0398;;;;N;;;;03B8
+ #
+ # Case-insensitive comparison should treat all of them as equivalent.
+ #
+ # But .toLowerCase() doesn't change ϑ (it's already lowercase),
+ # and .toUpperCase() doesn't change ϴ (already uppercase).
+ #
+ # Applying first lower then upper case normalizes any character:
+ # '\u0398\u03f4\u03b8\u03d1'.toLowerCase().toUpperCase() === '\u0398\u0398\u0398\u0398'
+ #
+ # Note: this is equivalent to unicode case folding; unicode normalization
+ # is a different step that is not required here.
+ #
+ # Final result should be uppercased, because it's later stored in an object
+ # (this avoid a conflict with Object.prototype members,
+ # most notably, `__proto__`)
+ #
+ return string.lower().upper()
+
+
+LINK_OPEN_RE = re.compile(r"^\s]", flags=re.IGNORECASE)
+LINK_CLOSE_RE = re.compile(r"^", flags=re.IGNORECASE)
+
+
+def isLinkOpen(string: str) -> bool:
+ return bool(LINK_OPEN_RE.search(string))
+
+
+def isLinkClose(string: str) -> bool:
+ return bool(LINK_CLOSE_RE.search(string))
diff --git a/lib/markdown_it/helpers/__init__.py b/lib/markdown_it/helpers/__init__.py
new file mode 100644
index 0000000..f4e2cd2
--- /dev/null
+++ b/lib/markdown_it/helpers/__init__.py
@@ -0,0 +1,6 @@
+"""Functions for parsing Links"""
+
+__all__ = ("parseLinkDestination", "parseLinkLabel", "parseLinkTitle")
+from .parse_link_destination import parseLinkDestination
+from .parse_link_label import parseLinkLabel
+from .parse_link_title import parseLinkTitle
diff --git a/lib/markdown_it/helpers/__pycache__/__init__.cpython-314.pyc b/lib/markdown_it/helpers/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..2c0595b
Binary files /dev/null and b/lib/markdown_it/helpers/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/markdown_it/helpers/__pycache__/parse_link_destination.cpython-314.pyc b/lib/markdown_it/helpers/__pycache__/parse_link_destination.cpython-314.pyc
new file mode 100644
index 0000000..2fac45f
Binary files /dev/null and b/lib/markdown_it/helpers/__pycache__/parse_link_destination.cpython-314.pyc differ
diff --git a/lib/markdown_it/helpers/__pycache__/parse_link_label.cpython-314.pyc b/lib/markdown_it/helpers/__pycache__/parse_link_label.cpython-314.pyc
new file mode 100644
index 0000000..e66d999
Binary files /dev/null and b/lib/markdown_it/helpers/__pycache__/parse_link_label.cpython-314.pyc differ
diff --git a/lib/markdown_it/helpers/__pycache__/parse_link_title.cpython-314.pyc b/lib/markdown_it/helpers/__pycache__/parse_link_title.cpython-314.pyc
new file mode 100644
index 0000000..a30014a
Binary files /dev/null and b/lib/markdown_it/helpers/__pycache__/parse_link_title.cpython-314.pyc differ
diff --git a/lib/markdown_it/helpers/parse_link_destination.py b/lib/markdown_it/helpers/parse_link_destination.py
new file mode 100644
index 0000000..c98323c
--- /dev/null
+++ b/lib/markdown_it/helpers/parse_link_destination.py
@@ -0,0 +1,83 @@
+"""
+Parse link destination
+"""
+
+from ..common.utils import charCodeAt, unescapeAll
+
+
+class _Result:
+ __slots__ = ("ok", "pos", "str")
+
+ def __init__(self) -> None:
+ self.ok = False
+ self.pos = 0
+ self.str = ""
+
+
+def parseLinkDestination(string: str, pos: int, maximum: int) -> _Result:
+ start = pos
+ result = _Result()
+
+ if charCodeAt(string, pos) == 0x3C: # /* < */
+ pos += 1
+ while pos < maximum:
+ code = charCodeAt(string, pos)
+ if code == 0x0A: # /* \n */)
+ return result
+ if code == 0x3C: # / * < * /
+ return result
+ if code == 0x3E: # /* > */) {
+ result.pos = pos + 1
+ result.str = unescapeAll(string[start + 1 : pos])
+ result.ok = True
+ return result
+
+ if code == 0x5C and pos + 1 < maximum: # \
+ pos += 2
+ continue
+
+ pos += 1
+
+ # no closing '>'
+ return result
+
+ # this should be ... } else { ... branch
+
+ level = 0
+ while pos < maximum:
+ code = charCodeAt(string, pos)
+
+ if code is None or code == 0x20:
+ break
+
+ # ascii control characters
+ if code < 0x20 or code == 0x7F:
+ break
+
+ if code == 0x5C and pos + 1 < maximum:
+ if charCodeAt(string, pos + 1) == 0x20:
+ break
+ pos += 2
+ continue
+
+ if code == 0x28: # /* ( */)
+ level += 1
+ if level > 32:
+ return result
+
+ if code == 0x29: # /* ) */)
+ if level == 0:
+ break
+ level -= 1
+
+ pos += 1
+
+ if start == pos:
+ return result
+ if level != 0:
+ return result
+
+ result.str = unescapeAll(string[start:pos])
+ result.pos = pos
+ result.ok = True
+ return result
diff --git a/lib/markdown_it/helpers/parse_link_label.py b/lib/markdown_it/helpers/parse_link_label.py
new file mode 100644
index 0000000..c80da5a
--- /dev/null
+++ b/lib/markdown_it/helpers/parse_link_label.py
@@ -0,0 +1,44 @@
+"""
+Parse link label
+
+this function assumes that first character ("[") already matches
+returns the end of the label
+
+"""
+
+from markdown_it.rules_inline import StateInline
+
+
+def parseLinkLabel(state: StateInline, start: int, disableNested: bool = False) -> int:
+ labelEnd = -1
+ oldPos = state.pos
+ found = False
+
+ state.pos = start + 1
+ level = 1
+
+ while state.pos < state.posMax:
+ marker = state.src[state.pos]
+ if marker == "]":
+ level -= 1
+ if level == 0:
+ found = True
+ break
+
+ prevPos = state.pos
+ state.md.inline.skipToken(state)
+ if marker == "[":
+ if prevPos == state.pos - 1:
+ # increase level if we find text `[`,
+ # which is not a part of any token
+ level += 1
+ elif disableNested:
+ state.pos = oldPos
+ return -1
+ if found:
+ labelEnd = state.pos
+
+ # restore old state
+ state.pos = oldPos
+
+ return labelEnd
diff --git a/lib/markdown_it/helpers/parse_link_title.py b/lib/markdown_it/helpers/parse_link_title.py
new file mode 100644
index 0000000..a38ff0d
--- /dev/null
+++ b/lib/markdown_it/helpers/parse_link_title.py
@@ -0,0 +1,75 @@
+"""Parse link title"""
+
+from ..common.utils import charCodeAt, unescapeAll
+
+
+class _State:
+ __slots__ = ("can_continue", "marker", "ok", "pos", "str")
+
+ def __init__(self) -> None:
+ self.ok = False
+ """if `true`, this is a valid link title"""
+ self.can_continue = False
+ """if `true`, this link can be continued on the next line"""
+ self.pos = 0
+ """if `ok`, it's the position of the first character after the closing marker"""
+ self.str = ""
+ """if `ok`, it's the unescaped title"""
+ self.marker = 0
+ """expected closing marker character code"""
+
+ def __str__(self) -> str:
+ return self.str
+
+
+def parseLinkTitle(
+ string: str, start: int, maximum: int, prev_state: _State | None = None
+) -> _State:
+ """Parse link title within `str` in [start, max] range,
+ or continue previous parsing if `prev_state` is defined (equal to result of last execution).
+ """
+ pos = start
+ state = _State()
+
+ if prev_state is not None:
+ # this is a continuation of a previous parseLinkTitle call on the next line,
+ # used in reference links only
+ state.str = prev_state.str
+ state.marker = prev_state.marker
+ else:
+ if pos >= maximum:
+ return state
+
+ marker = charCodeAt(string, pos)
+
+ # /* " */ /* ' */ /* ( */
+ if marker != 0x22 and marker != 0x27 and marker != 0x28:
+ return state
+
+ start += 1
+ pos += 1
+
+ # if opening marker is "(", switch it to closing marker ")"
+ if marker == 0x28:
+ marker = 0x29
+
+ state.marker = marker
+
+ while pos < maximum:
+ code = charCodeAt(string, pos)
+ if code == state.marker:
+ state.pos = pos + 1
+ state.str += unescapeAll(string[start:pos])
+ state.ok = True
+ return state
+ elif code == 0x28 and state.marker == 0x29: # /* ( */ /* ) */
+ return state
+ elif code == 0x5C and pos + 1 < maximum: # /* \ */
+ pos += 1
+
+ pos += 1
+
+ # no closing marker found, but this link title may continue on the next line (for references)
+ state.can_continue = True
+ state.str += unescapeAll(string[start:pos])
+ return state
diff --git a/lib/markdown_it/main.py b/lib/markdown_it/main.py
new file mode 100644
index 0000000..bf9fd18
--- /dev/null
+++ b/lib/markdown_it/main.py
@@ -0,0 +1,350 @@
+from __future__ import annotations
+
+from collections.abc import Callable, Generator, Iterable, Mapping, MutableMapping
+from contextlib import contextmanager
+from typing import Any, Literal, overload
+
+from . import helpers, presets
+from .common import normalize_url, utils
+from .parser_block import ParserBlock
+from .parser_core import ParserCore
+from .parser_inline import ParserInline
+from .renderer import RendererHTML, RendererProtocol
+from .rules_core.state_core import StateCore
+from .token import Token
+from .utils import EnvType, OptionsDict, OptionsType, PresetType
+
+try:
+ import linkify_it
+except ModuleNotFoundError:
+ linkify_it = None
+
+
+_PRESETS: dict[str, PresetType] = {
+ "default": presets.default.make(),
+ "js-default": presets.js_default.make(),
+ "zero": presets.zero.make(),
+ "commonmark": presets.commonmark.make(),
+ "gfm-like": presets.gfm_like.make(),
+}
+
+
+class MarkdownIt:
+ def __init__(
+ self,
+ config: str | PresetType = "commonmark",
+ options_update: Mapping[str, Any] | None = None,
+ *,
+ renderer_cls: Callable[[MarkdownIt], RendererProtocol] = RendererHTML,
+ ):
+ """Main parser class
+
+ :param config: name of configuration to load or a pre-defined dictionary
+ :param options_update: dictionary that will be merged into ``config["options"]``
+ :param renderer_cls: the class to load as the renderer:
+ ``self.renderer = renderer_cls(self)
+ """
+ # add modules
+ self.utils = utils
+ self.helpers = helpers
+
+ # initialise classes
+ self.inline = ParserInline()
+ self.block = ParserBlock()
+ self.core = ParserCore()
+ self.renderer = renderer_cls(self)
+ self.linkify = linkify_it.LinkifyIt() if linkify_it else None
+
+ # set the configuration
+ if options_update and not isinstance(options_update, Mapping):
+ # catch signature change where renderer_cls was not used as a key-word
+ raise TypeError(
+ f"options_update should be a mapping: {options_update}"
+ "\n(Perhaps you intended this to be the renderer_cls?)"
+ )
+ self.configure(config, options_update=options_update)
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__module__}.{self.__class__.__name__}()"
+
+ @overload
+ def __getitem__(self, name: Literal["inline"]) -> ParserInline: ...
+
+ @overload
+ def __getitem__(self, name: Literal["block"]) -> ParserBlock: ...
+
+ @overload
+ def __getitem__(self, name: Literal["core"]) -> ParserCore: ...
+
+ @overload
+ def __getitem__(self, name: Literal["renderer"]) -> RendererProtocol: ...
+
+ @overload
+ def __getitem__(self, name: str) -> Any: ...
+
+ def __getitem__(self, name: str) -> Any:
+ return {
+ "inline": self.inline,
+ "block": self.block,
+ "core": self.core,
+ "renderer": self.renderer,
+ }[name]
+
+ def set(self, options: OptionsType) -> None:
+ """Set parser options (in the same format as in constructor).
+ Probably, you will never need it, but you can change options after constructor call.
+
+ __Note:__ To achieve the best possible performance, don't modify a
+ `markdown-it` instance options on the fly. If you need multiple configurations
+ it's best to create multiple instances and initialize each with separate config.
+ """
+ self.options = OptionsDict(options)
+
+ def configure(
+ self, presets: str | PresetType, options_update: Mapping[str, Any] | None = None
+ ) -> MarkdownIt:
+ """Batch load of all options and component settings.
+ This is an internal method, and you probably will not need it.
+ But if you will - see available presets and data structure
+ [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets)
+
+ We strongly recommend to use presets instead of direct config loads.
+ That will give better compatibility with next versions.
+ """
+ if isinstance(presets, str):
+ if presets not in _PRESETS:
+ raise KeyError(f"Wrong `markdown-it` preset '{presets}', check name")
+ config = _PRESETS[presets]
+ else:
+ config = presets
+
+ if not config:
+ raise ValueError("Wrong `markdown-it` config, can't be empty")
+
+ options = config.get("options", {}) or {}
+ if options_update:
+ options = {**options, **options_update} # type: ignore
+
+ self.set(options) # type: ignore
+
+ if "components" in config:
+ for name, component in config["components"].items():
+ rules = component.get("rules", None)
+ if rules:
+ self[name].ruler.enableOnly(rules)
+ rules2 = component.get("rules2", None)
+ if rules2:
+ self[name].ruler2.enableOnly(rules2)
+
+ return self
+
+ def get_all_rules(self) -> dict[str, list[str]]:
+ """Return the names of all active rules."""
+ rules = {
+ chain: self[chain].ruler.get_all_rules()
+ for chain in ["core", "block", "inline"]
+ }
+ rules["inline2"] = self.inline.ruler2.get_all_rules()
+ return rules
+
+ def get_active_rules(self) -> dict[str, list[str]]:
+ """Return the names of all active rules."""
+ rules = {
+ chain: self[chain].ruler.get_active_rules()
+ for chain in ["core", "block", "inline"]
+ }
+ rules["inline2"] = self.inline.ruler2.get_active_rules()
+ return rules
+
+ def enable(
+ self, names: str | Iterable[str], ignoreInvalid: bool = False
+ ) -> MarkdownIt:
+ """Enable list or rules. (chainable)
+
+ :param names: rule name or list of rule names to enable.
+ :param ignoreInvalid: set `true` to ignore errors when rule not found.
+
+ It will automatically find appropriate components,
+ containing rules with given names. If rule not found, and `ignoreInvalid`
+ not set - throws exception.
+
+ Example::
+
+ md = MarkdownIt().enable(['sub', 'sup']).disable('smartquotes')
+
+ """
+ result = []
+
+ if isinstance(names, str):
+ names = [names]
+
+ for chain in ["core", "block", "inline"]:
+ result.extend(self[chain].ruler.enable(names, True))
+ result.extend(self.inline.ruler2.enable(names, True))
+
+ missed = [name for name in names if name not in result]
+ if missed and not ignoreInvalid:
+ raise ValueError(f"MarkdownIt. Failed to enable unknown rule(s): {missed}")
+
+ return self
+
+ def disable(
+ self, names: str | Iterable[str], ignoreInvalid: bool = False
+ ) -> MarkdownIt:
+ """The same as [[MarkdownIt.enable]], but turn specified rules off. (chainable)
+
+ :param names: rule name or list of rule names to disable.
+ :param ignoreInvalid: set `true` to ignore errors when rule not found.
+
+ """
+ result = []
+
+ if isinstance(names, str):
+ names = [names]
+
+ for chain in ["core", "block", "inline"]:
+ result.extend(self[chain].ruler.disable(names, True))
+ result.extend(self.inline.ruler2.disable(names, True))
+
+ missed = [name for name in names if name not in result]
+ if missed and not ignoreInvalid:
+ raise ValueError(f"MarkdownIt. Failed to disable unknown rule(s): {missed}")
+ return self
+
+ @contextmanager
+ def reset_rules(self) -> Generator[None, None, None]:
+ """A context manager, that will reset the current enabled rules on exit."""
+ chain_rules = self.get_active_rules()
+ yield
+ for chain, rules in chain_rules.items():
+ if chain != "inline2":
+ self[chain].ruler.enableOnly(rules)
+ self.inline.ruler2.enableOnly(chain_rules["inline2"])
+
+ def add_render_rule(
+ self, name: str, function: Callable[..., Any], fmt: str = "html"
+ ) -> None:
+ """Add a rule for rendering a particular Token type.
+
+ Only applied when ``renderer.__output__ == fmt``
+ """
+ if self.renderer.__output__ == fmt:
+ self.renderer.rules[name] = function.__get__(self.renderer) # type: ignore
+
+ def use(
+ self, plugin: Callable[..., None], *params: Any, **options: Any
+ ) -> MarkdownIt:
+ """Load specified plugin with given params into current parser instance. (chainable)
+
+ It's just a sugar to call `plugin(md, params)` with curring.
+
+ Example::
+
+ def func(tokens, idx):
+ tokens[idx].content = tokens[idx].content.replace('foo', 'bar')
+ md = MarkdownIt().use(plugin, 'foo_replace', 'text', func)
+
+ """
+ plugin(self, *params, **options)
+ return self
+
+ def parse(self, src: str, env: EnvType | None = None) -> list[Token]:
+ """Parse the source string to a token stream
+
+ :param src: source string
+ :param env: environment sandbox
+
+ Parse input string and return list of block tokens (special token type
+ "inline" will contain list of inline tokens).
+
+ `env` is used to pass data between "distributed" rules and return additional
+ metadata like reference info, needed for the renderer. It also can be used to
+ inject data in specific cases. Usually, you will be ok to pass `{}`,
+ and then pass updated object to renderer.
+ """
+ env = {} if env is None else env
+ if not isinstance(env, MutableMapping):
+ raise TypeError(f"Input data should be a MutableMapping, not {type(env)}")
+ if not isinstance(src, str):
+ raise TypeError(f"Input data should be a string, not {type(src)}")
+ state = StateCore(src, self, env)
+ self.core.process(state)
+ return state.tokens
+
+ def render(self, src: str, env: EnvType | None = None) -> Any:
+ """Render markdown string into html. It does all magic for you :).
+
+ :param src: source string
+ :param env: environment sandbox
+ :returns: The output of the loaded renderer
+
+ `env` can be used to inject additional metadata (`{}` by default).
+ But you will not need it with high probability. See also comment
+ in [[MarkdownIt.parse]].
+ """
+ env = {} if env is None else env
+ return self.renderer.render(self.parse(src, env), self.options, env)
+
+ def parseInline(self, src: str, env: EnvType | None = None) -> list[Token]:
+ """The same as [[MarkdownIt.parse]] but skip all block rules.
+
+ :param src: source string
+ :param env: environment sandbox
+
+ It returns the
+ block tokens list with the single `inline` element, containing parsed inline
+ tokens in `children` property. Also updates `env` object.
+ """
+ env = {} if env is None else env
+ if not isinstance(env, MutableMapping):
+ raise TypeError(f"Input data should be an MutableMapping, not {type(env)}")
+ if not isinstance(src, str):
+ raise TypeError(f"Input data should be a string, not {type(src)}")
+ state = StateCore(src, self, env)
+ state.inlineMode = True
+ self.core.process(state)
+ return state.tokens
+
+ def renderInline(self, src: str, env: EnvType | None = None) -> Any:
+ """Similar to [[MarkdownIt.render]] but for single paragraph content.
+
+ :param src: source string
+ :param env: environment sandbox
+
+ Similar to [[MarkdownIt.render]] but for single paragraph content. Result
+ will NOT be wrapped into `` tags.
+ """
+ env = {} if env is None else env
+ return self.renderer.render(self.parseInline(src, env), self.options, env)
+
+ # link methods
+
+ def validateLink(self, url: str) -> bool:
+ """Validate if the URL link is allowed in output.
+
+ This validator can prohibit more than really needed to prevent XSS.
+ It's a tradeoff to keep code simple and to be secure by default.
+
+ Note: the url should be normalized at this point, and existing entities decoded.
+ """
+ return normalize_url.validateLink(url)
+
+ def normalizeLink(self, url: str) -> str:
+ """Normalize destination URLs in links
+
+ ::
+
+ [label]: destination 'title'
+ ^^^^^^^^^^^
+ """
+ return normalize_url.normalizeLink(url)
+
+ def normalizeLinkText(self, link: str) -> str:
+ """Normalize autolink content
+
+ ::
+
+
+ ~~~~~~~~~~~
+ """
+ return normalize_url.normalizeLinkText(link)
diff --git a/lib/markdown_it/parser_block.py b/lib/markdown_it/parser_block.py
new file mode 100644
index 0000000..50a7184
--- /dev/null
+++ b/lib/markdown_it/parser_block.py
@@ -0,0 +1,113 @@
+"""Block-level tokenizer."""
+
+from __future__ import annotations
+
+from collections.abc import Callable
+import logging
+from typing import TYPE_CHECKING
+
+from . import rules_block
+from .ruler import Ruler
+from .rules_block.state_block import StateBlock
+from .token import Token
+from .utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it import MarkdownIt
+
+LOGGER = logging.getLogger(__name__)
+
+
+RuleFuncBlockType = Callable[[StateBlock, int, int, bool], bool]
+"""(state: StateBlock, startLine: int, endLine: int, silent: bool) -> matched: bool)
+
+`silent` disables token generation, useful for lookahead.
+"""
+
+_rules: list[tuple[str, RuleFuncBlockType, list[str]]] = [
+ # First 2 params - rule name & source. Secondary array - list of rules,
+ # which can be terminated by this one.
+ ("table", rules_block.table, ["paragraph", "reference"]),
+ ("code", rules_block.code, []),
+ ("fence", rules_block.fence, ["paragraph", "reference", "blockquote", "list"]),
+ (
+ "blockquote",
+ rules_block.blockquote,
+ ["paragraph", "reference", "blockquote", "list"],
+ ),
+ ("hr", rules_block.hr, ["paragraph", "reference", "blockquote", "list"]),
+ ("list", rules_block.list_block, ["paragraph", "reference", "blockquote"]),
+ ("reference", rules_block.reference, []),
+ ("html_block", rules_block.html_block, ["paragraph", "reference", "blockquote"]),
+ ("heading", rules_block.heading, ["paragraph", "reference", "blockquote"]),
+ ("lheading", rules_block.lheading, []),
+ ("paragraph", rules_block.paragraph, []),
+]
+
+
+class ParserBlock:
+ """
+ ParserBlock#ruler -> Ruler
+
+ [[Ruler]] instance. Keep configuration of block rules.
+ """
+
+ def __init__(self) -> None:
+ self.ruler = Ruler[RuleFuncBlockType]()
+ for name, rule, alt in _rules:
+ self.ruler.push(name, rule, {"alt": alt})
+
+ def tokenize(self, state: StateBlock, startLine: int, endLine: int) -> None:
+ """Generate tokens for input range."""
+ rules = self.ruler.getRules("")
+ line = startLine
+ maxNesting = state.md.options.maxNesting
+ hasEmptyLines = False
+
+ while line < endLine:
+ state.line = line = state.skipEmptyLines(line)
+ if line >= endLine:
+ break
+ if state.sCount[line] < state.blkIndent:
+ # Termination condition for nested calls.
+ # Nested calls currently used for blockquotes & lists
+ break
+ if state.level >= maxNesting:
+ # If nesting level exceeded - skip tail to the end.
+ # That's not ordinary situation and we should not care about content.
+ state.line = endLine
+ break
+
+ # Try all possible rules.
+ # On success, rule should:
+ # - update `state.line`
+ # - update `state.tokens`
+ # - return True
+ for rule in rules:
+ if rule(state, line, endLine, False):
+ break
+
+ # set state.tight if we had an empty line before current tag
+ # i.e. latest empty line should not count
+ state.tight = not hasEmptyLines
+
+ line = state.line
+
+ # paragraph might "eat" one newline after it in nested lists
+ if (line - 1) < endLine and state.isEmpty(line - 1):
+ hasEmptyLines = True
+
+ if line < endLine and state.isEmpty(line):
+ hasEmptyLines = True
+ line += 1
+ state.line = line
+
+ def parse(
+ self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token]
+ ) -> list[Token] | None:
+ """Process input string and push block tokens into `outTokens`."""
+ if not src:
+ return None
+ state = StateBlock(src, md, env, outTokens)
+ self.tokenize(state, state.line, state.lineMax)
+ return state.tokens
diff --git a/lib/markdown_it/parser_core.py b/lib/markdown_it/parser_core.py
new file mode 100644
index 0000000..8f5b921
--- /dev/null
+++ b/lib/markdown_it/parser_core.py
@@ -0,0 +1,46 @@
+"""
+* class Core
+*
+* Top-level rules executor. Glues block/inline parsers and does intermediate
+* transformations.
+"""
+
+from __future__ import annotations
+
+from collections.abc import Callable
+
+from .ruler import Ruler
+from .rules_core import (
+ block,
+ inline,
+ linkify,
+ normalize,
+ replace,
+ smartquotes,
+ text_join,
+)
+from .rules_core.state_core import StateCore
+
+RuleFuncCoreType = Callable[[StateCore], None]
+
+_rules: list[tuple[str, RuleFuncCoreType]] = [
+ ("normalize", normalize),
+ ("block", block),
+ ("inline", inline),
+ ("linkify", linkify),
+ ("replacements", replace),
+ ("smartquotes", smartquotes),
+ ("text_join", text_join),
+]
+
+
+class ParserCore:
+ def __init__(self) -> None:
+ self.ruler = Ruler[RuleFuncCoreType]()
+ for name, rule in _rules:
+ self.ruler.push(name, rule)
+
+ def process(self, state: StateCore) -> None:
+ """Executes core chain rules."""
+ for rule in self.ruler.getRules(""):
+ rule(state)
diff --git a/lib/markdown_it/parser_inline.py b/lib/markdown_it/parser_inline.py
new file mode 100644
index 0000000..26ec2e6
--- /dev/null
+++ b/lib/markdown_it/parser_inline.py
@@ -0,0 +1,148 @@
+"""Tokenizes paragraph content."""
+
+from __future__ import annotations
+
+from collections.abc import Callable
+from typing import TYPE_CHECKING
+
+from . import rules_inline
+from .ruler import Ruler
+from .rules_inline.state_inline import StateInline
+from .token import Token
+from .utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it import MarkdownIt
+
+
+# Parser rules
+RuleFuncInlineType = Callable[[StateInline, bool], bool]
+"""(state: StateInline, silent: bool) -> matched: bool)
+
+`silent` disables token generation, useful for lookahead.
+"""
+_rules: list[tuple[str, RuleFuncInlineType]] = [
+ ("text", rules_inline.text),
+ ("linkify", rules_inline.linkify),
+ ("newline", rules_inline.newline),
+ ("escape", rules_inline.escape),
+ ("backticks", rules_inline.backtick),
+ ("strikethrough", rules_inline.strikethrough.tokenize),
+ ("emphasis", rules_inline.emphasis.tokenize),
+ ("link", rules_inline.link),
+ ("image", rules_inline.image),
+ ("autolink", rules_inline.autolink),
+ ("html_inline", rules_inline.html_inline),
+ ("entity", rules_inline.entity),
+]
+
+# Note `rule2` ruleset was created specifically for emphasis/strikethrough
+# post-processing and may be changed in the future.
+#
+# Don't use this for anything except pairs (plugins working with `balance_pairs`).
+#
+RuleFuncInline2Type = Callable[[StateInline], None]
+_rules2: list[tuple[str, RuleFuncInline2Type]] = [
+ ("balance_pairs", rules_inline.link_pairs),
+ ("strikethrough", rules_inline.strikethrough.postProcess),
+ ("emphasis", rules_inline.emphasis.postProcess),
+ # rules for pairs separate '**' into its own text tokens, which may be left unused,
+ # rule below merges unused segments back with the rest of the text
+ ("fragments_join", rules_inline.fragments_join),
+]
+
+
+class ParserInline:
+ def __init__(self) -> None:
+ self.ruler = Ruler[RuleFuncInlineType]()
+ for name, rule in _rules:
+ self.ruler.push(name, rule)
+ # Second ruler used for post-processing (e.g. in emphasis-like rules)
+ self.ruler2 = Ruler[RuleFuncInline2Type]()
+ for name, rule2 in _rules2:
+ self.ruler2.push(name, rule2)
+
+ def skipToken(self, state: StateInline) -> None:
+ """Skip single token by running all rules in validation mode;
+ returns `True` if any rule reported success
+ """
+ ok = False
+ pos = state.pos
+ rules = self.ruler.getRules("")
+ maxNesting = state.md.options["maxNesting"]
+ cache = state.cache
+
+ if pos in cache:
+ state.pos = cache[pos]
+ return
+
+ if state.level < maxNesting:
+ for rule in rules:
+ # Increment state.level and decrement it later to limit recursion.
+ # It's harmless to do here, because no tokens are created.
+ # But ideally, we'd need a separate private state variable for this purpose.
+ state.level += 1
+ ok = rule(state, True)
+ state.level -= 1
+ if ok:
+ break
+ else:
+ # Too much nesting, just skip until the end of the paragraph.
+ #
+ # NOTE: this will cause links to behave incorrectly in the following case,
+ # when an amount of `[` is exactly equal to `maxNesting + 1`:
+ #
+ # [[[[[[[[[[[[[[[[[[[[[foo]()
+ #
+ # TODO: remove this workaround when CM standard will allow nested links
+ # (we can replace it by preventing links from being parsed in
+ # validation mode)
+ #
+ state.pos = state.posMax
+
+ if not ok:
+ state.pos += 1
+ cache[pos] = state.pos
+
+ def tokenize(self, state: StateInline) -> None:
+ """Generate tokens for input range."""
+ ok = False
+ rules = self.ruler.getRules("")
+ end = state.posMax
+ maxNesting = state.md.options["maxNesting"]
+
+ while state.pos < end:
+ # Try all possible rules.
+ # On success, rule should:
+ #
+ # - update `state.pos`
+ # - update `state.tokens`
+ # - return true
+
+ if state.level < maxNesting:
+ for rule in rules:
+ ok = rule(state, False)
+ if ok:
+ break
+
+ if ok:
+ if state.pos >= end:
+ break
+ continue
+
+ state.pending += state.src[state.pos]
+ state.pos += 1
+
+ if state.pending:
+ state.pushPending()
+
+ def parse(
+ self, src: str, md: MarkdownIt, env: EnvType, tokens: list[Token]
+ ) -> list[Token]:
+ """Process input string and push inline tokens into `tokens`"""
+ state = StateInline(src, md, env, tokens)
+ self.tokenize(state)
+ rules2 = self.ruler2.getRules("")
+ for rule in rules2:
+ rule(state)
+ return state.tokens
diff --git a/lib/markdown_it/port.yaml b/lib/markdown_it/port.yaml
new file mode 100644
index 0000000..ce2dde9
--- /dev/null
+++ b/lib/markdown_it/port.yaml
@@ -0,0 +1,48 @@
+- package: markdown-it/markdown-it
+ version: 14.1.0
+ commit: 0fe7ccb4b7f30236fb05f623be6924961d296d3d
+ date: Mar 19, 2024
+ notes:
+ - Rename variables that use python built-in names, e.g.
+ - `max` -> `maximum`
+ - `len` -> `length`
+ - `str` -> `string`
+ - |
+ Convert JS `for` loops to `while` loops
+ this is generally the main difference between the codes,
+ because in python you can't do e.g. `for {i=1;i PresetType:
+ config = commonmark.make()
+ config["components"]["core"]["rules"].append("linkify")
+ config["components"]["block"]["rules"].append("table")
+ config["components"]["inline"]["rules"].extend(["strikethrough", "linkify"])
+ config["components"]["inline"]["rules2"].append("strikethrough")
+ config["options"]["linkify"] = True
+ config["options"]["html"] = True
+ return config
diff --git a/lib/markdown_it/presets/__pycache__/__init__.cpython-314.pyc b/lib/markdown_it/presets/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..e46f728
Binary files /dev/null and b/lib/markdown_it/presets/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/markdown_it/presets/__pycache__/commonmark.cpython-314.pyc b/lib/markdown_it/presets/__pycache__/commonmark.cpython-314.pyc
new file mode 100644
index 0000000..80650fa
Binary files /dev/null and b/lib/markdown_it/presets/__pycache__/commonmark.cpython-314.pyc differ
diff --git a/lib/markdown_it/presets/__pycache__/default.cpython-314.pyc b/lib/markdown_it/presets/__pycache__/default.cpython-314.pyc
new file mode 100644
index 0000000..0d4f15b
Binary files /dev/null and b/lib/markdown_it/presets/__pycache__/default.cpython-314.pyc differ
diff --git a/lib/markdown_it/presets/__pycache__/zero.cpython-314.pyc b/lib/markdown_it/presets/__pycache__/zero.cpython-314.pyc
new file mode 100644
index 0000000..e6b0aab
Binary files /dev/null and b/lib/markdown_it/presets/__pycache__/zero.cpython-314.pyc differ
diff --git a/lib/markdown_it/presets/commonmark.py b/lib/markdown_it/presets/commonmark.py
new file mode 100644
index 0000000..ed0de0f
--- /dev/null
+++ b/lib/markdown_it/presets/commonmark.py
@@ -0,0 +1,75 @@
+"""Commonmark default options.
+
+This differs to presets.default,
+primarily in that it allows HTML and does not enable components:
+
+- block: table
+- inline: strikethrough
+"""
+
+from ..utils import PresetType
+
+
+def make() -> PresetType:
+ return {
+ "options": {
+ "maxNesting": 20, # Internal protection, recursion limit
+ "html": True, # Enable HTML tags in source,
+ # this is just a shorthand for .enable(["html_inline", "html_block"])
+ # used by the linkify rule:
+ "linkify": False, # autoconvert URL-like texts to links
+ # used by the replacements and smartquotes rules
+ # Enable some language-neutral replacements + quotes beautification
+ "typographer": False,
+ # used by the smartquotes rule:
+ # Double + single quotes replacement pairs, when typographer enabled,
+ # and smartquotes on. Could be either a String or an Array.
+ #
+ # For example, you can use '«»„“' for Russian, '„“‚‘' for German,
+ # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
+ "quotes": "\u201c\u201d\u2018\u2019", # /* “”‘’ */
+ # Renderer specific; these options are used directly in the HTML renderer
+ "xhtmlOut": True, # Use '/' to close single tags (
)
+ "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks
+ # Highlighter function. Should return escaped HTML,
+ # or '' if the source string is not changed and should be escaped externally.
+ # If result starts with PresetType:
+ return {
+ "options": {
+ "maxNesting": 100, # Internal protection, recursion limit
+ "html": False, # Enable HTML tags in source
+ # this is just a shorthand for .disable(["html_inline", "html_block"])
+ # used by the linkify rule:
+ "linkify": False, # autoconvert URL-like texts to links
+ # used by the replacements and smartquotes rules:
+ # Enable some language-neutral replacements + quotes beautification
+ "typographer": False,
+ # used by the smartquotes rule:
+ # Double + single quotes replacement pairs, when typographer enabled,
+ # and smartquotes on. Could be either a String or an Array.
+ # For example, you can use '«»„“' for Russian, '„“‚‘' for German,
+ # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
+ "quotes": "\u201c\u201d\u2018\u2019", # /* “”‘’ */
+ # Renderer specific; these options are used directly in the HTML renderer
+ "xhtmlOut": False, # Use '/' to close single tags (
)
+ "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks
+ # Highlighter function. Should return escaped HTML,
+ # or '' if the source string is not changed and should be escaped externally.
+ # If result starts with PresetType:
+ return {
+ "options": {
+ "maxNesting": 20, # Internal protection, recursion limit
+ "html": False, # Enable HTML tags in source
+ # this is just a shorthand for .disable(["html_inline", "html_block"])
+ # used by the linkify rule:
+ "linkify": False, # autoconvert URL-like texts to links
+ # used by the replacements and smartquotes rules:
+ # Enable some language-neutral replacements + quotes beautification
+ "typographer": False,
+ # used by the smartquotes rule:
+ # Double + single quotes replacement pairs, when typographer enabled,
+ # and smartquotes on. Could be either a String or an Array.
+ # For example, you can use '«»„“' for Russian, '„“‚‘' for German,
+ # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
+ "quotes": "\u201c\u201d\u2018\u2019", # /* “”‘’ */
+ # Renderer specific; these options are used directly in the HTML renderer
+ "xhtmlOut": False, # Use '/' to close single tags (
)
+ "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks
+ # Highlighter function. Should return escaped HTML,
+ # or '' if the source string is not changed and should be escaped externally.
+ # If result starts with Any: ...
+
+
+class RendererHTML(RendererProtocol):
+ """Contains render rules for tokens. Can be updated and extended.
+
+ Example:
+
+ Each rule is called as independent static function with fixed signature:
+
+ ::
+
+ class Renderer:
+ def token_type_name(self, tokens, idx, options, env) {
+ # ...
+ return renderedHTML
+
+ ::
+
+ class CustomRenderer(RendererHTML):
+ def strong_open(self, tokens, idx, options, env):
+ return ''
+ def strong_close(self, tokens, idx, options, env):
+ return ''
+
+ md = MarkdownIt(renderer_cls=CustomRenderer)
+
+ result = md.render(...)
+
+ See https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js
+ for more details and examples.
+ """
+
+ __output__ = "html"
+
+ def __init__(self, parser: Any = None):
+ self.rules = {
+ k: v
+ for k, v in inspect.getmembers(self, predicate=inspect.ismethod)
+ if not (k.startswith("render") or k.startswith("_"))
+ }
+
+ def render(
+ self, tokens: Sequence[Token], options: OptionsDict, env: EnvType
+ ) -> str:
+ """Takes token stream and generates HTML.
+
+ :param tokens: list on block tokens to render
+ :param options: params of parser instance
+ :param env: additional data from parsed input
+
+ """
+ result = ""
+
+ for i, token in enumerate(tokens):
+ if token.type == "inline":
+ if token.children:
+ result += self.renderInline(token.children, options, env)
+ elif token.type in self.rules:
+ result += self.rules[token.type](tokens, i, options, env)
+ else:
+ result += self.renderToken(tokens, i, options, env)
+
+ return result
+
+ def renderInline(
+ self, tokens: Sequence[Token], options: OptionsDict, env: EnvType
+ ) -> str:
+ """The same as ``render``, but for single token of `inline` type.
+
+ :param tokens: list on block tokens to render
+ :param options: params of parser instance
+ :param env: additional data from parsed input (references, for example)
+ """
+ result = ""
+
+ for i, token in enumerate(tokens):
+ if token.type in self.rules:
+ result += self.rules[token.type](tokens, i, options, env)
+ else:
+ result += self.renderToken(tokens, i, options, env)
+
+ return result
+
+ def renderToken(
+ self,
+ tokens: Sequence[Token],
+ idx: int,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ """Default token renderer.
+
+ Can be overridden by custom function
+
+ :param idx: token index to render
+ :param options: params of parser instance
+ """
+ result = ""
+ needLf = False
+ token = tokens[idx]
+
+ # Tight list paragraphs
+ if token.hidden:
+ return ""
+
+ # Insert a newline between hidden paragraph and subsequent opening
+ # block-level tag.
+ #
+ # For example, here we should insert a newline before blockquote:
+ # - a
+ # >
+ #
+ if token.block and token.nesting != -1 and idx and tokens[idx - 1].hidden:
+ result += "\n"
+
+ # Add token name, e.g. `
`.
+ #
+ needLf = False
+
+ result += ">\n" if needLf else ">"
+
+ return result
+
+ @staticmethod
+ def renderAttrs(token: Token) -> str:
+ """Render token attributes to string."""
+ result = ""
+
+ for key, value in token.attrItems():
+ result += " " + escapeHtml(key) + '="' + escapeHtml(str(value)) + '"'
+
+ return result
+
+ def renderInlineAsText(
+ self,
+ tokens: Sequence[Token] | None,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ """Special kludge for image `alt` attributes to conform CommonMark spec.
+
+ Don't try to use it! Spec requires to show `alt` content with stripped markup,
+ instead of simple escaping.
+
+ :param tokens: list on block tokens to render
+ :param options: params of parser instance
+ :param env: additional data from parsed input
+ """
+ result = ""
+
+ for token in tokens or []:
+ if token.type == "text":
+ result += token.content
+ elif token.type == "image":
+ if token.children:
+ result += self.renderInlineAsText(token.children, options, env)
+ elif token.type == "softbreak":
+ result += "\n"
+
+ return result
+
+ ###################################################
+
+ def code_inline(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ token = tokens[idx]
+ return (
+ ""
+ + escapeHtml(tokens[idx].content)
+ + ""
+ )
+
+ def code_block(
+ self,
+ tokens: Sequence[Token],
+ idx: int,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ token = tokens[idx]
+
+ return (
+ ""
+ + escapeHtml(tokens[idx].content)
+ + "
\n"
+ )
+
+ def fence(
+ self,
+ tokens: Sequence[Token],
+ idx: int,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ token = tokens[idx]
+ info = unescapeAll(token.info).strip() if token.info else ""
+ langName = ""
+ langAttrs = ""
+
+ if info:
+ arr = info.split(maxsplit=1)
+ langName = arr[0]
+ if len(arr) == 2:
+ langAttrs = arr[1]
+
+ if options.highlight:
+ highlighted = options.highlight(
+ token.content, langName, langAttrs
+ ) or escapeHtml(token.content)
+ else:
+ highlighted = escapeHtml(token.content)
+
+ if highlighted.startswith(""
+ + highlighted
+ + "
\n"
+ )
+
+ return (
+ ""
+ + highlighted
+ + "
\n"
+ )
+
+ def image(
+ self,
+ tokens: Sequence[Token],
+ idx: int,
+ options: OptionsDict,
+ env: EnvType,
+ ) -> str:
+ token = tokens[idx]
+
+ # "alt" attr MUST be set, even if empty. Because it's mandatory and
+ # should be placed on proper position for tests.
+ if token.children:
+ token.attrSet("alt", self.renderInlineAsText(token.children, options, env))
+ else:
+ token.attrSet("alt", "")
+
+ return self.renderToken(tokens, idx, options, env)
+
+ def hardbreak(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return "
\n" if options.xhtmlOut else "
\n"
+
+ def softbreak(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return (
+ ("
\n" if options.xhtmlOut else "
\n") if options.breaks else "\n"
+ )
+
+ def text(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return escapeHtml(tokens[idx].content)
+
+ def html_block(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return tokens[idx].content
+
+ def html_inline(
+ self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
+ ) -> str:
+ return tokens[idx].content
diff --git a/lib/markdown_it/ruler.py b/lib/markdown_it/ruler.py
new file mode 100644
index 0000000..91ab580
--- /dev/null
+++ b/lib/markdown_it/ruler.py
@@ -0,0 +1,275 @@
+"""
+class Ruler
+
+Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and
+[[MarkdownIt#inline]] to manage sequences of functions (rules):
+
+- keep rules in defined order
+- assign the name to each rule
+- enable/disable rules
+- add/replace rules
+- allow assign rules to additional named chains (in the same)
+- caching lists of active rules
+
+You will not need use this class directly until write plugins. For simple
+rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and
+[[MarkdownIt.use]].
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterable
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING, Generic, TypedDict, TypeVar
+import warnings
+
+from .utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it import MarkdownIt
+
+
+class StateBase:
+ def __init__(self, src: str, md: MarkdownIt, env: EnvType):
+ self.src = src
+ self.env = env
+ self.md = md
+
+ @property
+ def src(self) -> str:
+ return self._src
+
+ @src.setter
+ def src(self, value: str) -> None:
+ self._src = value
+ self._srcCharCode: tuple[int, ...] | None = None
+
+ @property
+ def srcCharCode(self) -> tuple[int, ...]:
+ warnings.warn(
+ "StateBase.srcCharCode is deprecated. Use StateBase.src instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if self._srcCharCode is None:
+ self._srcCharCode = tuple(ord(c) for c in self._src)
+ return self._srcCharCode
+
+
+class RuleOptionsType(TypedDict, total=False):
+ alt: list[str]
+
+
+RuleFuncTv = TypeVar("RuleFuncTv")
+"""A rule function, whose signature is dependent on the state type."""
+
+
+@dataclass(slots=True)
+class Rule(Generic[RuleFuncTv]):
+ name: str
+ enabled: bool
+ fn: RuleFuncTv = field(repr=False)
+ alt: list[str]
+
+
+class Ruler(Generic[RuleFuncTv]):
+ def __init__(self) -> None:
+ # List of added rules.
+ self.__rules__: list[Rule[RuleFuncTv]] = []
+ # Cached rule chains.
+ # First level - chain name, '' for default.
+ # Second level - diginal anchor for fast filtering by charcodes.
+ self.__cache__: dict[str, list[RuleFuncTv]] | None = None
+
+ def __find__(self, name: str) -> int:
+ """Find rule index by name"""
+ for i, rule in enumerate(self.__rules__):
+ if rule.name == name:
+ return i
+ return -1
+
+ def __compile__(self) -> None:
+ """Build rules lookup cache"""
+ chains = {""}
+ # collect unique names
+ for rule in self.__rules__:
+ if not rule.enabled:
+ continue
+ for name in rule.alt:
+ chains.add(name)
+ self.__cache__ = {}
+ for chain in chains:
+ self.__cache__[chain] = []
+ for rule in self.__rules__:
+ if not rule.enabled:
+ continue
+ if chain and (chain not in rule.alt):
+ continue
+ self.__cache__[chain].append(rule.fn)
+
+ def at(
+ self, ruleName: str, fn: RuleFuncTv, options: RuleOptionsType | None = None
+ ) -> None:
+ """Replace rule by name with new function & options.
+
+ :param ruleName: rule name to replace.
+ :param fn: new rule function.
+ :param options: new rule options (not mandatory).
+ :raises: KeyError if name not found
+ """
+ index = self.__find__(ruleName)
+ options = options or {}
+ if index == -1:
+ raise KeyError(f"Parser rule not found: {ruleName}")
+ self.__rules__[index].fn = fn
+ self.__rules__[index].alt = options.get("alt", [])
+ self.__cache__ = None
+
+ def before(
+ self,
+ beforeName: str,
+ ruleName: str,
+ fn: RuleFuncTv,
+ options: RuleOptionsType | None = None,
+ ) -> None:
+ """Add new rule to chain before one with given name.
+
+ :param beforeName: new rule will be added before this one.
+ :param ruleName: new rule will be added before this one.
+ :param fn: new rule function.
+ :param options: new rule options (not mandatory).
+ :raises: KeyError if name not found
+ """
+ index = self.__find__(beforeName)
+ options = options or {}
+ if index == -1:
+ raise KeyError(f"Parser rule not found: {beforeName}")
+ self.__rules__.insert(
+ index, Rule[RuleFuncTv](ruleName, True, fn, options.get("alt", []))
+ )
+ self.__cache__ = None
+
+ def after(
+ self,
+ afterName: str,
+ ruleName: str,
+ fn: RuleFuncTv,
+ options: RuleOptionsType | None = None,
+ ) -> None:
+ """Add new rule to chain after one with given name.
+
+ :param afterName: new rule will be added after this one.
+ :param ruleName: new rule will be added after this one.
+ :param fn: new rule function.
+ :param options: new rule options (not mandatory).
+ :raises: KeyError if name not found
+ """
+ index = self.__find__(afterName)
+ options = options or {}
+ if index == -1:
+ raise KeyError(f"Parser rule not found: {afterName}")
+ self.__rules__.insert(
+ index + 1, Rule[RuleFuncTv](ruleName, True, fn, options.get("alt", []))
+ )
+ self.__cache__ = None
+
+ def push(
+ self, ruleName: str, fn: RuleFuncTv, options: RuleOptionsType | None = None
+ ) -> None:
+ """Push new rule to the end of chain.
+
+ :param ruleName: new rule will be added to the end of chain.
+ :param fn: new rule function.
+ :param options: new rule options (not mandatory).
+
+ """
+ self.__rules__.append(
+ Rule[RuleFuncTv](ruleName, True, fn, (options or {}).get("alt", []))
+ )
+ self.__cache__ = None
+
+ def enable(
+ self, names: str | Iterable[str], ignoreInvalid: bool = False
+ ) -> list[str]:
+ """Enable rules with given names.
+
+ :param names: name or list of rule names to enable.
+ :param ignoreInvalid: ignore errors when rule not found
+ :raises: KeyError if name not found and not ignoreInvalid
+ :return: list of found rule names
+ """
+ if isinstance(names, str):
+ names = [names]
+ result: list[str] = []
+ for name in names:
+ idx = self.__find__(name)
+ if (idx < 0) and ignoreInvalid:
+ continue
+ if (idx < 0) and not ignoreInvalid:
+ raise KeyError(f"Rules manager: invalid rule name {name}")
+ self.__rules__[idx].enabled = True
+ result.append(name)
+ self.__cache__ = None
+ return result
+
+ def enableOnly(
+ self, names: str | Iterable[str], ignoreInvalid: bool = False
+ ) -> list[str]:
+ """Enable rules with given names, and disable everything else.
+
+ :param names: name or list of rule names to enable.
+ :param ignoreInvalid: ignore errors when rule not found
+ :raises: KeyError if name not found and not ignoreInvalid
+ :return: list of found rule names
+ """
+ if isinstance(names, str):
+ names = [names]
+ for rule in self.__rules__:
+ rule.enabled = False
+ return self.enable(names, ignoreInvalid)
+
+ def disable(
+ self, names: str | Iterable[str], ignoreInvalid: bool = False
+ ) -> list[str]:
+ """Disable rules with given names.
+
+ :param names: name or list of rule names to enable.
+ :param ignoreInvalid: ignore errors when rule not found
+ :raises: KeyError if name not found and not ignoreInvalid
+ :return: list of found rule names
+ """
+ if isinstance(names, str):
+ names = [names]
+ result = []
+ for name in names:
+ idx = self.__find__(name)
+ if (idx < 0) and ignoreInvalid:
+ continue
+ if (idx < 0) and not ignoreInvalid:
+ raise KeyError(f"Rules manager: invalid rule name {name}")
+ self.__rules__[idx].enabled = False
+ result.append(name)
+ self.__cache__ = None
+ return result
+
+ def getRules(self, chainName: str = "") -> list[RuleFuncTv]:
+ """Return array of active functions (rules) for given chain name.
+ It analyzes rules configuration, compiles caches if not exists and returns result.
+
+ Default chain name is `''` (empty string). It can't be skipped.
+ That's done intentionally, to keep signature monomorphic for high speed.
+
+ """
+ if self.__cache__ is None:
+ self.__compile__()
+ assert self.__cache__ is not None
+ # Chain can be empty, if rules disabled. But we still have to return Array.
+ return self.__cache__.get(chainName, []) or []
+
+ def get_all_rules(self) -> list[str]:
+ """Return all available rule names."""
+ return [r.name for r in self.__rules__]
+
+ def get_active_rules(self) -> list[str]:
+ """Return the active rule names."""
+ return [r.name for r in self.__rules__ if r.enabled]
diff --git a/lib/markdown_it/rules_block/__init__.py b/lib/markdown_it/rules_block/__init__.py
new file mode 100644
index 0000000..517da23
--- /dev/null
+++ b/lib/markdown_it/rules_block/__init__.py
@@ -0,0 +1,27 @@
+__all__ = (
+ "StateBlock",
+ "blockquote",
+ "code",
+ "fence",
+ "heading",
+ "hr",
+ "html_block",
+ "lheading",
+ "list_block",
+ "paragraph",
+ "reference",
+ "table",
+)
+
+from .blockquote import blockquote
+from .code import code
+from .fence import fence
+from .heading import heading
+from .hr import hr
+from .html_block import html_block
+from .lheading import lheading
+from .list import list_block
+from .paragraph import paragraph
+from .reference import reference
+from .state_block import StateBlock
+from .table import table
diff --git a/lib/markdown_it/rules_block/__pycache__/__init__.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..ff71ac5
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/blockquote.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/blockquote.cpython-314.pyc
new file mode 100644
index 0000000..69140c2
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/blockquote.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/code.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/code.cpython-314.pyc
new file mode 100644
index 0000000..889656b
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/code.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/fence.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/fence.cpython-314.pyc
new file mode 100644
index 0000000..c3e9215
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/fence.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/heading.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/heading.cpython-314.pyc
new file mode 100644
index 0000000..12af1dd
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/heading.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/hr.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/hr.cpython-314.pyc
new file mode 100644
index 0000000..9203733
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/hr.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/html_block.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/html_block.cpython-314.pyc
new file mode 100644
index 0000000..f938621
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/html_block.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/lheading.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/lheading.cpython-314.pyc
new file mode 100644
index 0000000..86bd1e0
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/lheading.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/list.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/list.cpython-314.pyc
new file mode 100644
index 0000000..e9c03f7
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/list.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/paragraph.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/paragraph.cpython-314.pyc
new file mode 100644
index 0000000..aca8006
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/paragraph.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/reference.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/reference.cpython-314.pyc
new file mode 100644
index 0000000..05b5e88
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/reference.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/state_block.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/state_block.cpython-314.pyc
new file mode 100644
index 0000000..1b7ce84
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/state_block.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/__pycache__/table.cpython-314.pyc b/lib/markdown_it/rules_block/__pycache__/table.cpython-314.pyc
new file mode 100644
index 0000000..b72dd2a
Binary files /dev/null and b/lib/markdown_it/rules_block/__pycache__/table.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_block/blockquote.py b/lib/markdown_it/rules_block/blockquote.py
new file mode 100644
index 0000000..0c9081b
--- /dev/null
+++ b/lib/markdown_it/rules_block/blockquote.py
@@ -0,0 +1,299 @@
+# Block quotes
+from __future__ import annotations
+
+import logging
+
+from ..common.utils import isStrSpace
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def blockquote(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug(
+ "entering blockquote: %s, %s, %s, %s", state, startLine, endLine, silent
+ )
+
+ oldLineMax = state.lineMax
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ max = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ # check the block quote marker
+ try:
+ if state.src[pos] != ">":
+ return False
+ except IndexError:
+ return False
+ pos += 1
+
+ # we know that it's going to be a valid blockquote,
+ # so no point trying to find the end of it in silent mode
+ if silent:
+ return True
+
+ # set offset past spaces and ">"
+ initial = offset = state.sCount[startLine] + 1
+
+ try:
+ second_char: str | None = state.src[pos]
+ except IndexError:
+ second_char = None
+
+ # skip one optional space after '>'
+ if second_char == " ":
+ # ' > test '
+ # ^ -- position start of line here:
+ pos += 1
+ initial += 1
+ offset += 1
+ adjustTab = False
+ spaceAfterMarker = True
+ elif second_char == "\t":
+ spaceAfterMarker = True
+
+ if (state.bsCount[startLine] + offset) % 4 == 3:
+ # ' >\t test '
+ # ^ -- position start of line here (tab has width==1)
+ pos += 1
+ initial += 1
+ offset += 1
+ adjustTab = False
+ else:
+ # ' >\t test '
+ # ^ -- position start of line here + shift bsCount slightly
+ # to make extra space appear
+ adjustTab = True
+
+ else:
+ spaceAfterMarker = False
+
+ oldBMarks = [state.bMarks[startLine]]
+ state.bMarks[startLine] = pos
+
+ while pos < max:
+ ch = state.src[pos]
+
+ if isStrSpace(ch):
+ if ch == "\t":
+ offset += (
+ 4
+ - (offset + state.bsCount[startLine] + (1 if adjustTab else 0)) % 4
+ )
+ else:
+ offset += 1
+
+ else:
+ break
+
+ pos += 1
+
+ oldBSCount = [state.bsCount[startLine]]
+ state.bsCount[startLine] = (
+ state.sCount[startLine] + 1 + (1 if spaceAfterMarker else 0)
+ )
+
+ lastLineEmpty = pos >= max
+
+ oldSCount = [state.sCount[startLine]]
+ state.sCount[startLine] = offset - initial
+
+ oldTShift = [state.tShift[startLine]]
+ state.tShift[startLine] = pos - state.bMarks[startLine]
+
+ terminatorRules = state.md.block.ruler.getRules("blockquote")
+
+ oldParentType = state.parentType
+ state.parentType = "blockquote"
+
+ # Search the end of the block
+ #
+ # Block ends with either:
+ # 1. an empty line outside:
+ # ```
+ # > test
+ #
+ # ```
+ # 2. an empty line inside:
+ # ```
+ # >
+ # test
+ # ```
+ # 3. another tag:
+ # ```
+ # > test
+ # - - -
+ # ```
+
+ # for (nextLine = startLine + 1; nextLine < endLine; nextLine++) {
+ nextLine = startLine + 1
+ while nextLine < endLine:
+ # check if it's outdented, i.e. it's inside list item and indented
+ # less than said list item:
+ #
+ # ```
+ # 1. anything
+ # > current blockquote
+ # 2. checking this line
+ # ```
+ isOutdented = state.sCount[nextLine] < state.blkIndent
+
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ max = state.eMarks[nextLine]
+
+ if pos >= max:
+ # Case 1: line is not inside the blockquote, and this line is empty.
+ break
+
+ evaluatesTrue = state.src[pos] == ">" and not isOutdented
+ pos += 1
+ if evaluatesTrue:
+ # This line is inside the blockquote.
+
+ # set offset past spaces and ">"
+ initial = offset = state.sCount[nextLine] + 1
+
+ try:
+ next_char: str | None = state.src[pos]
+ except IndexError:
+ next_char = None
+
+ # skip one optional space after '>'
+ if next_char == " ":
+ # ' > test '
+ # ^ -- position start of line here:
+ pos += 1
+ initial += 1
+ offset += 1
+ adjustTab = False
+ spaceAfterMarker = True
+ elif next_char == "\t":
+ spaceAfterMarker = True
+
+ if (state.bsCount[nextLine] + offset) % 4 == 3:
+ # ' >\t test '
+ # ^ -- position start of line here (tab has width==1)
+ pos += 1
+ initial += 1
+ offset += 1
+ adjustTab = False
+ else:
+ # ' >\t test '
+ # ^ -- position start of line here + shift bsCount slightly
+ # to make extra space appear
+ adjustTab = True
+
+ else:
+ spaceAfterMarker = False
+
+ oldBMarks.append(state.bMarks[nextLine])
+ state.bMarks[nextLine] = pos
+
+ while pos < max:
+ ch = state.src[pos]
+
+ if isStrSpace(ch):
+ if ch == "\t":
+ offset += (
+ 4
+ - (
+ offset
+ + state.bsCount[nextLine]
+ + (1 if adjustTab else 0)
+ )
+ % 4
+ )
+ else:
+ offset += 1
+ else:
+ break
+
+ pos += 1
+
+ lastLineEmpty = pos >= max
+
+ oldBSCount.append(state.bsCount[nextLine])
+ state.bsCount[nextLine] = (
+ state.sCount[nextLine] + 1 + (1 if spaceAfterMarker else 0)
+ )
+
+ oldSCount.append(state.sCount[nextLine])
+ state.sCount[nextLine] = offset - initial
+
+ oldTShift.append(state.tShift[nextLine])
+ state.tShift[nextLine] = pos - state.bMarks[nextLine]
+
+ nextLine += 1
+ continue
+
+ # Case 2: line is not inside the blockquote, and the last line was empty.
+ if lastLineEmpty:
+ break
+
+ # Case 3: another tag found.
+ terminate = False
+
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ if terminate:
+ # Quirk to enforce "hard termination mode" for paragraphs;
+ # normally if you call `tokenize(state, startLine, nextLine)`,
+ # paragraphs will look below nextLine for paragraph continuation,
+ # but if blockquote is terminated by another tag, they shouldn't
+ state.lineMax = nextLine
+
+ if state.blkIndent != 0:
+ # state.blkIndent was non-zero, we now set it to zero,
+ # so we need to re-calculate all offsets to appear as
+ # if indent wasn't changed
+ oldBMarks.append(state.bMarks[nextLine])
+ oldBSCount.append(state.bsCount[nextLine])
+ oldTShift.append(state.tShift[nextLine])
+ oldSCount.append(state.sCount[nextLine])
+ state.sCount[nextLine] -= state.blkIndent
+
+ break
+
+ oldBMarks.append(state.bMarks[nextLine])
+ oldBSCount.append(state.bsCount[nextLine])
+ oldTShift.append(state.tShift[nextLine])
+ oldSCount.append(state.sCount[nextLine])
+
+ # A negative indentation means that this is a paragraph continuation
+ #
+ state.sCount[nextLine] = -1
+
+ nextLine += 1
+
+ oldIndent = state.blkIndent
+ state.blkIndent = 0
+
+ token = state.push("blockquote_open", "blockquote", 1)
+ token.markup = ">"
+ token.map = lines = [startLine, 0]
+
+ state.md.block.tokenize(state, startLine, nextLine)
+
+ token = state.push("blockquote_close", "blockquote", -1)
+ token.markup = ">"
+
+ state.lineMax = oldLineMax
+ state.parentType = oldParentType
+ lines[1] = state.line
+
+ # Restore original tShift; this might not be necessary since the parser
+ # has already been here, but just to make sure we can do that.
+ for i, item in enumerate(oldTShift):
+ state.bMarks[i + startLine] = oldBMarks[i]
+ state.tShift[i + startLine] = item
+ state.sCount[i + startLine] = oldSCount[i]
+ state.bsCount[i + startLine] = oldBSCount[i]
+
+ state.blkIndent = oldIndent
+
+ return True
diff --git a/lib/markdown_it/rules_block/code.py b/lib/markdown_it/rules_block/code.py
new file mode 100644
index 0000000..af8a41c
--- /dev/null
+++ b/lib/markdown_it/rules_block/code.py
@@ -0,0 +1,36 @@
+"""Code block (4 spaces padded)."""
+
+import logging
+
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def code(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering code: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ if not state.is_code_block(startLine):
+ return False
+
+ last = nextLine = startLine + 1
+
+ while nextLine < endLine:
+ if state.isEmpty(nextLine):
+ nextLine += 1
+ continue
+
+ if state.is_code_block(nextLine):
+ nextLine += 1
+ last = nextLine
+ continue
+
+ break
+
+ state.line = last
+
+ token = state.push("code_block", "code", 0)
+ token.content = state.getLines(startLine, last, 4 + state.blkIndent, False) + "\n"
+ token.map = [startLine, state.line]
+
+ return True
diff --git a/lib/markdown_it/rules_block/fence.py b/lib/markdown_it/rules_block/fence.py
new file mode 100644
index 0000000..263f1b8
--- /dev/null
+++ b/lib/markdown_it/rules_block/fence.py
@@ -0,0 +1,101 @@
+# fences (``` lang, ~~~ lang)
+import logging
+
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def fence(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering fence: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ haveEndMarker = False
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ if pos + 3 > maximum:
+ return False
+
+ marker = state.src[pos]
+
+ if marker not in ("~", "`"):
+ return False
+
+ # scan marker length
+ mem = pos
+ pos = state.skipCharsStr(pos, marker)
+
+ length = pos - mem
+
+ if length < 3:
+ return False
+
+ markup = state.src[mem:pos]
+ params = state.src[pos:maximum]
+
+ if marker == "`" and marker in params:
+ return False
+
+ # Since start is found, we can report success here in validation mode
+ if silent:
+ return True
+
+ # search end of block
+ nextLine = startLine
+
+ while True:
+ nextLine += 1
+ if nextLine >= endLine:
+ # unclosed block should be autoclosed by end of document.
+ # also block seems to be autoclosed by end of parent
+ break
+
+ pos = mem = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+
+ if pos < maximum and state.sCount[nextLine] < state.blkIndent:
+ # non-empty line with negative indent should stop the list:
+ # - ```
+ # test
+ break
+
+ try:
+ if state.src[pos] != marker:
+ continue
+ except IndexError:
+ break
+
+ if state.is_code_block(nextLine):
+ continue
+
+ pos = state.skipCharsStr(pos, marker)
+
+ # closing code fence must be at least as long as the opening one
+ if pos - mem < length:
+ continue
+
+ # make sure tail has spaces only
+ pos = state.skipSpaces(pos)
+
+ if pos < maximum:
+ continue
+
+ haveEndMarker = True
+ # found!
+ break
+
+ # If a fence has heading spaces, they should be removed from its inner block
+ length = state.sCount[startLine]
+
+ state.line = nextLine + (1 if haveEndMarker else 0)
+
+ token = state.push("fence", "code", 0)
+ token.info = params
+ token.content = state.getLines(startLine + 1, nextLine, length, True)
+ token.markup = markup
+ token.map = [startLine, state.line]
+
+ return True
diff --git a/lib/markdown_it/rules_block/heading.py b/lib/markdown_it/rules_block/heading.py
new file mode 100644
index 0000000..afcf9ed
--- /dev/null
+++ b/lib/markdown_it/rules_block/heading.py
@@ -0,0 +1,69 @@
+"""Atex heading (#, ##, ...)"""
+
+from __future__ import annotations
+
+import logging
+
+from ..common.utils import isStrSpace
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def heading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering heading: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ ch: str | None = state.src[pos]
+
+ if ch != "#" or pos >= maximum:
+ return False
+
+ # count heading level
+ level = 1
+ pos += 1
+ try:
+ ch = state.src[pos]
+ except IndexError:
+ ch = None
+ while ch == "#" and pos < maximum and level <= 6:
+ level += 1
+ pos += 1
+ try:
+ ch = state.src[pos]
+ except IndexError:
+ ch = None
+
+ if level > 6 or (pos < maximum and not isStrSpace(ch)):
+ return False
+
+ if silent:
+ return True
+
+ # Let's cut tails like ' ### ' from the end of string
+
+ maximum = state.skipSpacesBack(maximum, pos)
+ tmp = state.skipCharsStrBack(maximum, "#", pos)
+ if tmp > pos and isStrSpace(state.src[tmp - 1]):
+ maximum = tmp
+
+ state.line = startLine + 1
+
+ token = state.push("heading_open", "h" + str(level), 1)
+ token.markup = "########"[:level]
+ token.map = [startLine, state.line]
+
+ token = state.push("inline", "", 0)
+ token.content = state.src[pos:maximum].strip()
+ token.map = [startLine, state.line]
+ token.children = []
+
+ token = state.push("heading_close", "h" + str(level), -1)
+ token.markup = "########"[:level]
+
+ return True
diff --git a/lib/markdown_it/rules_block/hr.py b/lib/markdown_it/rules_block/hr.py
new file mode 100644
index 0000000..fca7d79
--- /dev/null
+++ b/lib/markdown_it/rules_block/hr.py
@@ -0,0 +1,56 @@
+"""Horizontal rule
+
+At least 3 of these characters on a line * - _
+"""
+
+import logging
+
+from ..common.utils import isStrSpace
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def hr(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering hr: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ try:
+ marker = state.src[pos]
+ except IndexError:
+ return False
+ pos += 1
+
+ # Check hr marker
+ if marker not in ("*", "-", "_"):
+ return False
+
+ # markers can be mixed with spaces, but there should be at least 3 of them
+
+ cnt = 1
+ while pos < maximum:
+ ch = state.src[pos]
+ pos += 1
+ if ch != marker and not isStrSpace(ch):
+ return False
+ if ch == marker:
+ cnt += 1
+
+ if cnt < 3:
+ return False
+
+ if silent:
+ return True
+
+ state.line = startLine + 1
+
+ token = state.push("hr", "hr", 0)
+ token.map = [startLine, state.line]
+ token.markup = marker * (cnt + 1)
+
+ return True
diff --git a/lib/markdown_it/rules_block/html_block.py b/lib/markdown_it/rules_block/html_block.py
new file mode 100644
index 0000000..3d43f6e
--- /dev/null
+++ b/lib/markdown_it/rules_block/html_block.py
@@ -0,0 +1,90 @@
+# HTML block
+from __future__ import annotations
+
+import logging
+import re
+
+from ..common.html_blocks import block_names
+from ..common.html_re import HTML_OPEN_CLOSE_TAG_STR
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+# An array of opening and corresponding closing sequences for html tags,
+# last argument defines whether it can terminate a paragraph or not
+HTML_SEQUENCES: list[tuple[re.Pattern[str], re.Pattern[str], bool]] = [
+ (
+ re.compile(r"^<(script|pre|style|textarea)(?=(\s|>|$))", re.IGNORECASE),
+ re.compile(r"<\/(script|pre|style|textarea)>", re.IGNORECASE),
+ True,
+ ),
+ (re.compile(r"^"), True),
+ (re.compile(r"^<\?"), re.compile(r"\?>"), True),
+ (re.compile(r"^"), True),
+ (re.compile(r"^"), True),
+ (
+ re.compile("^?(" + "|".join(block_names) + ")(?=(\\s|/?>|$))", re.IGNORECASE),
+ re.compile(r"^$"),
+ True,
+ ),
+ (re.compile(HTML_OPEN_CLOSE_TAG_STR + "\\s*$"), re.compile(r"^$"), False),
+]
+
+
+def html_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug(
+ "entering html_block: %s, %s, %s, %s", state, startLine, endLine, silent
+ )
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ if state.is_code_block(startLine):
+ return False
+
+ if not state.md.options.get("html", None):
+ return False
+
+ if state.src[pos] != "<":
+ return False
+
+ lineText = state.src[pos:maximum]
+
+ html_seq = None
+ for HTML_SEQUENCE in HTML_SEQUENCES:
+ if HTML_SEQUENCE[0].search(lineText):
+ html_seq = HTML_SEQUENCE
+ break
+
+ if not html_seq:
+ return False
+
+ if silent:
+ # true if this sequence can be a terminator, false otherwise
+ return html_seq[2]
+
+ nextLine = startLine + 1
+
+ # If we are here - we detected HTML block.
+ # Let's roll down till block end.
+ if not html_seq[1].search(lineText):
+ while nextLine < endLine:
+ if state.sCount[nextLine] < state.blkIndent:
+ break
+
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+ lineText = state.src[pos:maximum]
+
+ if html_seq[1].search(lineText):
+ if len(lineText) != 0:
+ nextLine += 1
+ break
+ nextLine += 1
+
+ state.line = nextLine
+
+ token = state.push("html_block", "", 0)
+ token.map = [startLine, nextLine]
+ token.content = state.getLines(startLine, nextLine, state.blkIndent, True)
+
+ return True
diff --git a/lib/markdown_it/rules_block/lheading.py b/lib/markdown_it/rules_block/lheading.py
new file mode 100644
index 0000000..3522207
--- /dev/null
+++ b/lib/markdown_it/rules_block/lheading.py
@@ -0,0 +1,86 @@
+# lheading (---, ==)
+import logging
+
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def lheading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering lheading: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ level = None
+ nextLine = startLine + 1
+ ruler = state.md.block.ruler
+ terminatorRules = ruler.getRules("paragraph")
+
+ if state.is_code_block(startLine):
+ return False
+
+ oldParentType = state.parentType
+ state.parentType = "paragraph" # use paragraph to match terminatorRules
+
+ # jump line-by-line until empty one or EOF
+ while nextLine < endLine and not state.isEmpty(nextLine):
+ # this would be a code block normally, but after paragraph
+ # it's considered a lazy continuation regardless of what's there
+ if state.sCount[nextLine] - state.blkIndent > 3:
+ nextLine += 1
+ continue
+
+ # Check for underline in setext header
+ if state.sCount[nextLine] >= state.blkIndent:
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+
+ if pos < maximum:
+ marker = state.src[pos]
+
+ if marker in ("-", "="):
+ pos = state.skipCharsStr(pos, marker)
+ pos = state.skipSpaces(pos)
+
+ # /* = */
+ if pos >= maximum:
+ level = 1 if marker == "=" else 2
+ break
+
+ # quirk for blockquotes, this line should already be checked by that rule
+ if state.sCount[nextLine] < 0:
+ nextLine += 1
+ continue
+
+ # Some tags can terminate paragraph without empty line.
+ terminate = False
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+ if terminate:
+ break
+
+ nextLine += 1
+
+ if not level:
+ # Didn't find valid underline
+ return False
+
+ content = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
+
+ state.line = nextLine + 1
+
+ token = state.push("heading_open", "h" + str(level), 1)
+ token.markup = marker
+ token.map = [startLine, state.line]
+
+ token = state.push("inline", "", 0)
+ token.content = content
+ token.map = [startLine, state.line - 1]
+ token.children = []
+
+ token = state.push("heading_close", "h" + str(level), -1)
+ token.markup = marker
+
+ state.parentType = oldParentType
+
+ return True
diff --git a/lib/markdown_it/rules_block/list.py b/lib/markdown_it/rules_block/list.py
new file mode 100644
index 0000000..d8070d7
--- /dev/null
+++ b/lib/markdown_it/rules_block/list.py
@@ -0,0 +1,345 @@
+# Lists
+import logging
+
+from ..common.utils import isStrSpace
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+# Search `[-+*][\n ]`, returns next pos after marker on success
+# or -1 on fail.
+def skipBulletListMarker(state: StateBlock, startLine: int) -> int:
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ try:
+ marker = state.src[pos]
+ except IndexError:
+ return -1
+ pos += 1
+
+ if marker not in ("*", "-", "+"):
+ return -1
+
+ if pos < maximum:
+ ch = state.src[pos]
+
+ if not isStrSpace(ch):
+ # " -test " - is not a list item
+ return -1
+
+ return pos
+
+
+# Search `\d+[.)][\n ]`, returns next pos after marker on success
+# or -1 on fail.
+def skipOrderedListMarker(state: StateBlock, startLine: int) -> int:
+ start = state.bMarks[startLine] + state.tShift[startLine]
+ pos = start
+ maximum = state.eMarks[startLine]
+
+ # List marker should have at least 2 chars (digit + dot)
+ if pos + 1 >= maximum:
+ return -1
+
+ ch = state.src[pos]
+ pos += 1
+
+ ch_ord = ord(ch)
+ # /* 0 */ /* 9 */
+ if ch_ord < 0x30 or ch_ord > 0x39:
+ return -1
+
+ while True:
+ # EOL -> fail
+ if pos >= maximum:
+ return -1
+
+ ch = state.src[pos]
+ pos += 1
+
+ # /* 0 */ /* 9 */
+ ch_ord = ord(ch)
+ if ch_ord >= 0x30 and ch_ord <= 0x39:
+ # List marker should have no more than 9 digits
+ # (prevents integer overflow in browsers)
+ if pos - start >= 10:
+ return -1
+
+ continue
+
+ # found valid marker
+ if ch in (")", "."):
+ break
+
+ return -1
+
+ if pos < maximum:
+ ch = state.src[pos]
+
+ if not isStrSpace(ch):
+ # " 1.test " - is not a list item
+ return -1
+
+ return pos
+
+
+def markTightParagraphs(state: StateBlock, idx: int) -> None:
+ level = state.level + 2
+
+ i = idx + 2
+ length = len(state.tokens) - 2
+ while i < length:
+ if state.tokens[i].level == level and state.tokens[i].type == "paragraph_open":
+ state.tokens[i + 2].hidden = True
+ state.tokens[i].hidden = True
+ i += 2
+ i += 1
+
+
+def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug("entering list: %s, %s, %s, %s", state, startLine, endLine, silent)
+
+ isTerminatingParagraph = False
+ tight = True
+
+ if state.is_code_block(startLine):
+ return False
+
+ # Special case:
+ # - item 1
+ # - item 2
+ # - item 3
+ # - item 4
+ # - this one is a paragraph continuation
+ if (
+ state.listIndent >= 0
+ and state.sCount[startLine] - state.listIndent >= 4
+ and state.sCount[startLine] < state.blkIndent
+ ):
+ return False
+
+ # limit conditions when list can interrupt
+ # a paragraph (validation mode only)
+ # Next list item should still terminate previous list item
+ #
+ # This code can fail if plugins use blkIndent as well as lists,
+ # but I hope the spec gets fixed long before that happens.
+ #
+ if (
+ silent
+ and state.parentType == "paragraph"
+ and state.sCount[startLine] >= state.blkIndent
+ ):
+ isTerminatingParagraph = True
+
+ # Detect list type and position after marker
+ posAfterMarker = skipOrderedListMarker(state, startLine)
+ if posAfterMarker >= 0:
+ isOrdered = True
+ start = state.bMarks[startLine] + state.tShift[startLine]
+ markerValue = int(state.src[start : posAfterMarker - 1])
+
+ # If we're starting a new ordered list right after
+ # a paragraph, it should start with 1.
+ if isTerminatingParagraph and markerValue != 1:
+ return False
+ else:
+ posAfterMarker = skipBulletListMarker(state, startLine)
+ if posAfterMarker >= 0:
+ isOrdered = False
+ else:
+ return False
+
+ # If we're starting a new unordered list right after
+ # a paragraph, first line should not be empty.
+ if (
+ isTerminatingParagraph
+ and state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]
+ ):
+ return False
+
+ # We should terminate list on style change. Remember first one to compare.
+ markerChar = state.src[posAfterMarker - 1]
+
+ # For validation mode we can terminate immediately
+ if silent:
+ return True
+
+ # Start list
+ listTokIdx = len(state.tokens)
+
+ if isOrdered:
+ token = state.push("ordered_list_open", "ol", 1)
+ if markerValue != 1:
+ token.attrs = {"start": markerValue}
+
+ else:
+ token = state.push("bullet_list_open", "ul", 1)
+
+ token.map = listLines = [startLine, 0]
+ token.markup = markerChar
+
+ #
+ # Iterate list items
+ #
+
+ nextLine = startLine
+ prevEmptyEnd = False
+ terminatorRules = state.md.block.ruler.getRules("list")
+
+ oldParentType = state.parentType
+ state.parentType = "list"
+
+ while nextLine < endLine:
+ pos = posAfterMarker
+ maximum = state.eMarks[nextLine]
+
+ initial = offset = (
+ state.sCount[nextLine]
+ + posAfterMarker
+ - (state.bMarks[startLine] + state.tShift[startLine])
+ )
+
+ while pos < maximum:
+ ch = state.src[pos]
+
+ if ch == "\t":
+ offset += 4 - (offset + state.bsCount[nextLine]) % 4
+ elif ch == " ":
+ offset += 1
+ else:
+ break
+
+ pos += 1
+
+ contentStart = pos
+
+ # trimming space in "- \n 3" case, indent is 1 here
+ indentAfterMarker = 1 if contentStart >= maximum else offset - initial
+
+ # If we have more than 4 spaces, the indent is 1
+ # (the rest is just indented code block)
+ if indentAfterMarker > 4:
+ indentAfterMarker = 1
+
+ # " - test"
+ # ^^^^^ - calculating total length of this thing
+ indent = initial + indentAfterMarker
+
+ # Run subparser & write tokens
+ token = state.push("list_item_open", "li", 1)
+ token.markup = markerChar
+ token.map = itemLines = [startLine, 0]
+ if isOrdered:
+ token.info = state.src[start : posAfterMarker - 1]
+
+ # change current state, then restore it after parser subcall
+ oldTight = state.tight
+ oldTShift = state.tShift[startLine]
+ oldSCount = state.sCount[startLine]
+
+ # - example list
+ # ^ listIndent position will be here
+ # ^ blkIndent position will be here
+ #
+ oldListIndent = state.listIndent
+ state.listIndent = state.blkIndent
+ state.blkIndent = indent
+
+ state.tight = True
+ state.tShift[startLine] = contentStart - state.bMarks[startLine]
+ state.sCount[startLine] = offset
+
+ if contentStart >= maximum and state.isEmpty(startLine + 1):
+ # workaround for this case
+ # (list item is empty, list terminates before "foo"):
+ # ~~~~~~~~
+ # -
+ #
+ # foo
+ # ~~~~~~~~
+ state.line = min(state.line + 2, endLine)
+ else:
+ # NOTE in list.js this was:
+ # state.md.block.tokenize(state, startLine, endLine, True)
+ # but tokeniz does not take the final parameter
+ state.md.block.tokenize(state, startLine, endLine)
+
+ # If any of list item is tight, mark list as tight
+ if (not state.tight) or prevEmptyEnd:
+ tight = False
+
+ # Item become loose if finish with empty line,
+ # but we should filter last element, because it means list finish
+ prevEmptyEnd = (state.line - startLine) > 1 and state.isEmpty(state.line - 1)
+
+ state.blkIndent = state.listIndent
+ state.listIndent = oldListIndent
+ state.tShift[startLine] = oldTShift
+ state.sCount[startLine] = oldSCount
+ state.tight = oldTight
+
+ token = state.push("list_item_close", "li", -1)
+ token.markup = markerChar
+
+ nextLine = startLine = state.line
+ itemLines[1] = nextLine
+
+ if nextLine >= endLine:
+ break
+
+ contentStart = state.bMarks[startLine]
+
+ #
+ # Try to check if list is terminated or continued.
+ #
+ if state.sCount[nextLine] < state.blkIndent:
+ break
+
+ if state.is_code_block(startLine):
+ break
+
+ # fail if terminating block found
+ terminate = False
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ if terminate:
+ break
+
+ # fail if list has another type
+ if isOrdered:
+ posAfterMarker = skipOrderedListMarker(state, nextLine)
+ if posAfterMarker < 0:
+ break
+ start = state.bMarks[nextLine] + state.tShift[nextLine]
+ else:
+ posAfterMarker = skipBulletListMarker(state, nextLine)
+ if posAfterMarker < 0:
+ break
+
+ if markerChar != state.src[posAfterMarker - 1]:
+ break
+
+ # Finalize list
+ if isOrdered:
+ token = state.push("ordered_list_close", "ol", -1)
+ else:
+ token = state.push("bullet_list_close", "ul", -1)
+
+ token.markup = markerChar
+
+ listLines[1] = nextLine
+ state.line = nextLine
+
+ state.parentType = oldParentType
+
+ # mark paragraphs tight if needed
+ if tight:
+ markTightParagraphs(state, listTokIdx)
+
+ return True
diff --git a/lib/markdown_it/rules_block/paragraph.py b/lib/markdown_it/rules_block/paragraph.py
new file mode 100644
index 0000000..30ba877
--- /dev/null
+++ b/lib/markdown_it/rules_block/paragraph.py
@@ -0,0 +1,66 @@
+"""Paragraph."""
+
+import logging
+
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def paragraph(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ LOGGER.debug(
+ "entering paragraph: %s, %s, %s, %s", state, startLine, endLine, silent
+ )
+
+ nextLine = startLine + 1
+ ruler = state.md.block.ruler
+ terminatorRules = ruler.getRules("paragraph")
+ endLine = state.lineMax
+
+ oldParentType = state.parentType
+ state.parentType = "paragraph"
+
+ # jump line-by-line until empty one or EOF
+ while nextLine < endLine:
+ if state.isEmpty(nextLine):
+ break
+ # this would be a code block normally, but after paragraph
+ # it's considered a lazy continuation regardless of what's there
+ if state.sCount[nextLine] - state.blkIndent > 3:
+ nextLine += 1
+ continue
+
+ # quirk for blockquotes, this line should already be checked by that rule
+ if state.sCount[nextLine] < 0:
+ nextLine += 1
+ continue
+
+ # Some tags can terminate paragraph without empty line.
+ terminate = False
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ if terminate:
+ break
+
+ nextLine += 1
+
+ content = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
+
+ state.line = nextLine
+
+ token = state.push("paragraph_open", "p", 1)
+ token.map = [startLine, state.line]
+
+ token = state.push("inline", "", 0)
+ token.content = content
+ token.map = [startLine, state.line]
+ token.children = []
+
+ token = state.push("paragraph_close", "p", -1)
+
+ state.parentType = oldParentType
+
+ return True
diff --git a/lib/markdown_it/rules_block/reference.py b/lib/markdown_it/rules_block/reference.py
new file mode 100644
index 0000000..ad94d40
--- /dev/null
+++ b/lib/markdown_it/rules_block/reference.py
@@ -0,0 +1,235 @@
+import logging
+
+from ..common.utils import charCodeAt, isSpace, normalizeReference
+from .state_block import StateBlock
+
+LOGGER = logging.getLogger(__name__)
+
+
+def reference(state: StateBlock, startLine: int, _endLine: int, silent: bool) -> bool:
+ LOGGER.debug(
+ "entering reference: %s, %s, %s, %s", state, startLine, _endLine, silent
+ )
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+ nextLine = startLine + 1
+
+ if state.is_code_block(startLine):
+ return False
+
+ if state.src[pos] != "[":
+ return False
+
+ string = state.src[pos : maximum + 1]
+
+ # string = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
+ maximum = len(string)
+
+ labelEnd = None
+ pos = 1
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if ch == 0x5B: # /* [ */
+ return False
+ elif ch == 0x5D: # /* ] */
+ labelEnd = pos
+ break
+ elif ch == 0x0A: # /* \n */
+ if (lineContent := getNextLine(state, nextLine)) is not None:
+ string += lineContent
+ maximum = len(string)
+ nextLine += 1
+ elif ch == 0x5C: # /* \ */
+ pos += 1
+ if (
+ pos < maximum
+ and charCodeAt(string, pos) == 0x0A
+ and (lineContent := getNextLine(state, nextLine)) is not None
+ ):
+ string += lineContent
+ maximum = len(string)
+ nextLine += 1
+ pos += 1
+
+ if (
+ labelEnd is None or labelEnd < 0 or charCodeAt(string, labelEnd + 1) != 0x3A
+ ): # /* : */
+ return False
+
+ # [label]: destination 'title'
+ # ^^^ skip optional whitespace here
+ pos = labelEnd + 2
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if ch == 0x0A:
+ if (lineContent := getNextLine(state, nextLine)) is not None:
+ string += lineContent
+ maximum = len(string)
+ nextLine += 1
+ elif isSpace(ch):
+ pass
+ else:
+ break
+ pos += 1
+
+ # [label]: destination 'title'
+ # ^^^^^^^^^^^ parse this
+ destRes = state.md.helpers.parseLinkDestination(string, pos, maximum)
+ if not destRes.ok:
+ return False
+
+ href = state.md.normalizeLink(destRes.str)
+ if not state.md.validateLink(href):
+ return False
+
+ pos = destRes.pos
+
+ # save cursor state, we could require to rollback later
+ destEndPos = pos
+ destEndLineNo = nextLine
+
+ # [label]: destination 'title'
+ # ^^^ skipping those spaces
+ start = pos
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if ch == 0x0A:
+ if (lineContent := getNextLine(state, nextLine)) is not None:
+ string += lineContent
+ maximum = len(string)
+ nextLine += 1
+ elif isSpace(ch):
+ pass
+ else:
+ break
+ pos += 1
+
+ # [label]: destination 'title'
+ # ^^^^^^^ parse this
+ titleRes = state.md.helpers.parseLinkTitle(string, pos, maximum, None)
+ while titleRes.can_continue:
+ if (lineContent := getNextLine(state, nextLine)) is None:
+ break
+ string += lineContent
+ pos = maximum
+ maximum = len(string)
+ nextLine += 1
+ titleRes = state.md.helpers.parseLinkTitle(string, pos, maximum, titleRes)
+
+ if pos < maximum and start != pos and titleRes.ok:
+ title = titleRes.str
+ pos = titleRes.pos
+ else:
+ title = ""
+ pos = destEndPos
+ nextLine = destEndLineNo
+
+ # skip trailing spaces until the rest of the line
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if not isSpace(ch):
+ break
+ pos += 1
+
+ if pos < maximum and charCodeAt(string, pos) != 0x0A and title:
+ # garbage at the end of the line after title,
+ # but it could still be a valid reference if we roll back
+ title = ""
+ pos = destEndPos
+ nextLine = destEndLineNo
+ while pos < maximum:
+ ch = charCodeAt(string, pos)
+ if not isSpace(ch):
+ break
+ pos += 1
+
+ if pos < maximum and charCodeAt(string, pos) != 0x0A:
+ # garbage at the end of the line
+ return False
+
+ label = normalizeReference(string[1:labelEnd])
+ if not label:
+ # CommonMark 0.20 disallows empty labels
+ return False
+
+ # Reference can not terminate anything. This check is for safety only.
+ if silent:
+ return True
+
+ if "references" not in state.env:
+ state.env["references"] = {}
+
+ state.line = nextLine
+
+ # note, this is not part of markdown-it JS, but is useful for renderers
+ if state.md.options.get("inline_definitions", False):
+ token = state.push("definition", "", 0)
+ token.meta = {
+ "id": label,
+ "title": title,
+ "url": href,
+ "label": string[1:labelEnd],
+ }
+ token.map = [startLine, state.line]
+
+ if label not in state.env["references"]:
+ state.env["references"][label] = {
+ "title": title,
+ "href": href,
+ "map": [startLine, state.line],
+ }
+ else:
+ state.env.setdefault("duplicate_refs", []).append(
+ {
+ "title": title,
+ "href": href,
+ "label": label,
+ "map": [startLine, state.line],
+ }
+ )
+
+ return True
+
+
+def getNextLine(state: StateBlock, nextLine: int) -> None | str:
+ endLine = state.lineMax
+
+ if nextLine >= endLine or state.isEmpty(nextLine):
+ # empty line or end of input
+ return None
+
+ isContinuation = False
+
+ # this would be a code block normally, but after paragraph
+ # it's considered a lazy continuation regardless of what's there
+ if state.is_code_block(nextLine):
+ isContinuation = True
+
+ # quirk for blockquotes, this line should already be checked by that rule
+ if state.sCount[nextLine] < 0:
+ isContinuation = True
+
+ if not isContinuation:
+ terminatorRules = state.md.block.ruler.getRules("reference")
+ oldParentType = state.parentType
+ state.parentType = "reference"
+
+ # Some tags can terminate paragraph without empty line.
+ terminate = False
+ for terminatorRule in terminatorRules:
+ if terminatorRule(state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ state.parentType = oldParentType
+
+ if terminate:
+ # terminated by another block
+ return None
+
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+
+ # max + 1 explicitly includes the newline
+ return state.src[pos : maximum + 1]
diff --git a/lib/markdown_it/rules_block/state_block.py b/lib/markdown_it/rules_block/state_block.py
new file mode 100644
index 0000000..445ad26
--- /dev/null
+++ b/lib/markdown_it/rules_block/state_block.py
@@ -0,0 +1,261 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Literal
+
+from ..common.utils import isStrSpace
+from ..ruler import StateBase
+from ..token import Token
+from ..utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it.main import MarkdownIt
+
+
+class StateBlock(StateBase):
+ def __init__(
+ self, src: str, md: MarkdownIt, env: EnvType, tokens: list[Token]
+ ) -> None:
+ self.src = src
+
+ # link to parser instance
+ self.md = md
+
+ self.env = env
+
+ #
+ # Internal state variables
+ #
+
+ self.tokens = tokens
+
+ self.bMarks: list[int] = [] # line begin offsets for fast jumps
+ self.eMarks: list[int] = [] # line end offsets for fast jumps
+ # offsets of the first non-space characters (tabs not expanded)
+ self.tShift: list[int] = []
+ self.sCount: list[int] = [] # indents for each line (tabs expanded)
+
+ # An amount of virtual spaces (tabs expanded) between beginning
+ # of each line (bMarks) and real beginning of that line.
+ #
+ # It exists only as a hack because blockquotes override bMarks
+ # losing information in the process.
+ #
+ # It's used only when expanding tabs, you can think about it as
+ # an initial tab length, e.g. bsCount=21 applied to string `\t123`
+ # means first tab should be expanded to 4-21%4 === 3 spaces.
+ #
+ self.bsCount: list[int] = []
+
+ # block parser variables
+ self.blkIndent = 0 # required block content indent (for example, if we are
+ # inside a list, it would be positioned after list marker)
+ self.line = 0 # line index in src
+ self.lineMax = 0 # lines count
+ self.tight = False # loose/tight mode for lists
+ self.ddIndent = -1 # indent of the current dd block (-1 if there isn't any)
+ self.listIndent = -1 # indent of the current list block (-1 if there isn't any)
+
+ # can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'
+ # used in lists to determine if they interrupt a paragraph
+ self.parentType = "root"
+
+ self.level = 0
+
+ # renderer
+ self.result = ""
+
+ # Create caches
+ # Generate markers.
+ indent_found = False
+
+ start = pos = indent = offset = 0
+ length = len(self.src)
+
+ for pos, character in enumerate(self.src):
+ if not indent_found:
+ if isStrSpace(character):
+ indent += 1
+
+ if character == "\t":
+ offset += 4 - offset % 4
+ else:
+ offset += 1
+ continue
+ else:
+ indent_found = True
+
+ if character == "\n" or pos == length - 1:
+ if character != "\n":
+ pos += 1
+ self.bMarks.append(start)
+ self.eMarks.append(pos)
+ self.tShift.append(indent)
+ self.sCount.append(offset)
+ self.bsCount.append(0)
+
+ indent_found = False
+ indent = 0
+ offset = 0
+ start = pos + 1
+
+ # Push fake entry to simplify cache bounds checks
+ self.bMarks.append(length)
+ self.eMarks.append(length)
+ self.tShift.append(0)
+ self.sCount.append(0)
+ self.bsCount.append(0)
+
+ self.lineMax = len(self.bMarks) - 1 # don't count last fake line
+
+ # pre-check if code blocks are enabled, to speed up is_code_block method
+ self._code_enabled = "code" in self.md["block"].ruler.get_active_rules()
+
+ def __repr__(self) -> str:
+ return (
+ f"{self.__class__.__name__}"
+ f"(line={self.line},level={self.level},tokens={len(self.tokens)})"
+ )
+
+ def push(self, ttype: str, tag: str, nesting: Literal[-1, 0, 1]) -> Token:
+ """Push new token to "stream"."""
+ token = Token(ttype, tag, nesting)
+ token.block = True
+ if nesting < 0:
+ self.level -= 1 # closing tag
+ token.level = self.level
+ if nesting > 0:
+ self.level += 1 # opening tag
+ self.tokens.append(token)
+ return token
+
+ def isEmpty(self, line: int) -> bool:
+ """."""
+ return (self.bMarks[line] + self.tShift[line]) >= self.eMarks[line]
+
+ def skipEmptyLines(self, from_pos: int) -> int:
+ """."""
+ while from_pos < self.lineMax:
+ try:
+ if (self.bMarks[from_pos] + self.tShift[from_pos]) < self.eMarks[
+ from_pos
+ ]:
+ break
+ except IndexError:
+ pass
+ from_pos += 1
+ return from_pos
+
+ def skipSpaces(self, pos: int) -> int:
+ """Skip spaces from given position."""
+ while True:
+ try:
+ current = self.src[pos]
+ except IndexError:
+ break
+ if not isStrSpace(current):
+ break
+ pos += 1
+ return pos
+
+ def skipSpacesBack(self, pos: int, minimum: int) -> int:
+ """Skip spaces from given position in reverse."""
+ if pos <= minimum:
+ return pos
+ while pos > minimum:
+ pos -= 1
+ if not isStrSpace(self.src[pos]):
+ return pos + 1
+ return pos
+
+ def skipChars(self, pos: int, code: int) -> int:
+ """Skip character code from given position."""
+ while True:
+ try:
+ current = self.srcCharCode[pos]
+ except IndexError:
+ break
+ if current != code:
+ break
+ pos += 1
+ return pos
+
+ def skipCharsStr(self, pos: int, ch: str) -> int:
+ """Skip character string from given position."""
+ while True:
+ try:
+ current = self.src[pos]
+ except IndexError:
+ break
+ if current != ch:
+ break
+ pos += 1
+ return pos
+
+ def skipCharsBack(self, pos: int, code: int, minimum: int) -> int:
+ """Skip character code reverse from given position - 1."""
+ if pos <= minimum:
+ return pos
+ while pos > minimum:
+ pos -= 1
+ if code != self.srcCharCode[pos]:
+ return pos + 1
+ return pos
+
+ def skipCharsStrBack(self, pos: int, ch: str, minimum: int) -> int:
+ """Skip character string reverse from given position - 1."""
+ if pos <= minimum:
+ return pos
+ while pos > minimum:
+ pos -= 1
+ if ch != self.src[pos]:
+ return pos + 1
+ return pos
+
+ def getLines(self, begin: int, end: int, indent: int, keepLastLF: bool) -> str:
+ """Cut lines range from source."""
+ line = begin
+ if begin >= end:
+ return ""
+
+ queue = [""] * (end - begin)
+
+ i = 1
+ while line < end:
+ lineIndent = 0
+ lineStart = first = self.bMarks[line]
+ last = (
+ self.eMarks[line] + 1
+ if line + 1 < end or keepLastLF
+ else self.eMarks[line]
+ )
+
+ while (first < last) and (lineIndent < indent):
+ ch = self.src[first]
+ if isStrSpace(ch):
+ if ch == "\t":
+ lineIndent += 4 - (lineIndent + self.bsCount[line]) % 4
+ else:
+ lineIndent += 1
+ elif first - lineStart < self.tShift[line]:
+ lineIndent += 1
+ else:
+ break
+ first += 1
+
+ if lineIndent > indent:
+ # partially expanding tabs in code blocks, e.g '\t\tfoobar'
+ # with indent=2 becomes ' \tfoobar'
+ queue[i - 1] = (" " * (lineIndent - indent)) + self.src[first:last]
+ else:
+ queue[i - 1] = self.src[first:last]
+
+ line += 1
+ i += 1
+
+ return "".join(queue)
+
+ def is_code_block(self, line: int) -> bool:
+ """Check if line is a code block,
+ i.e. the code block rule is enabled and text is indented by more than 3 spaces.
+ """
+ return self._code_enabled and (self.sCount[line] - self.blkIndent) >= 4
diff --git a/lib/markdown_it/rules_block/table.py b/lib/markdown_it/rules_block/table.py
new file mode 100644
index 0000000..c52553d
--- /dev/null
+++ b/lib/markdown_it/rules_block/table.py
@@ -0,0 +1,250 @@
+# GFM table, https://github.github.com/gfm/#tables-extension-
+from __future__ import annotations
+
+import re
+
+from ..common.utils import charStrAt, isStrSpace
+from .state_block import StateBlock
+
+headerLineRe = re.compile(r"^:?-+:?$")
+enclosingPipesRe = re.compile(r"^\||\|$")
+
+# Limit the amount of empty autocompleted cells in a table,
+# see https://github.com/markdown-it/markdown-it/issues/1000,
+# Both pulldown-cmark and commonmark-hs limit the number of cells this way to ~200k.
+# We set it to 65k, which can expand user input by a factor of x370
+# (256x256 square is 1.8kB expanded into 650kB).
+MAX_AUTOCOMPLETED_CELLS = 0x10000
+
+
+def getLine(state: StateBlock, line: int) -> str:
+ pos = state.bMarks[line] + state.tShift[line]
+ maximum = state.eMarks[line]
+
+ # return state.src.substr(pos, max - pos)
+ return state.src[pos:maximum]
+
+
+def escapedSplit(string: str) -> list[str]:
+ result: list[str] = []
+ pos = 0
+ max = len(string)
+ isEscaped = False
+ lastPos = 0
+ current = ""
+ ch = charStrAt(string, pos)
+
+ while pos < max:
+ if ch == "|":
+ if not isEscaped:
+ # pipe separating cells, '|'
+ result.append(current + string[lastPos:pos])
+ current = ""
+ lastPos = pos + 1
+ else:
+ # escaped pipe, '\|'
+ current += string[lastPos : pos - 1]
+ lastPos = pos
+
+ isEscaped = ch == "\\"
+ pos += 1
+
+ ch = charStrAt(string, pos)
+
+ result.append(current + string[lastPos:])
+
+ return result
+
+
+def table(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
+ tbodyLines = None
+
+ # should have at least two lines
+ if startLine + 2 > endLine:
+ return False
+
+ nextLine = startLine + 1
+
+ if state.sCount[nextLine] < state.blkIndent:
+ return False
+
+ if state.is_code_block(nextLine):
+ return False
+
+ # first character of the second line should be '|', '-', ':',
+ # and no other characters are allowed but spaces;
+ # basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp
+
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ if pos >= state.eMarks[nextLine]:
+ return False
+ first_ch = state.src[pos]
+ pos += 1
+ if first_ch not in ("|", "-", ":"):
+ return False
+
+ if pos >= state.eMarks[nextLine]:
+ return False
+ second_ch = state.src[pos]
+ pos += 1
+ if second_ch not in ("|", "-", ":") and not isStrSpace(second_ch):
+ return False
+
+ # if first character is '-', then second character must not be a space
+ # (due to parsing ambiguity with list)
+ if first_ch == "-" and isStrSpace(second_ch):
+ return False
+
+ while pos < state.eMarks[nextLine]:
+ ch = state.src[pos]
+
+ if ch not in ("|", "-", ":") and not isStrSpace(ch):
+ return False
+
+ pos += 1
+
+ lineText = getLine(state, startLine + 1)
+
+ columns = lineText.split("|")
+ aligns = []
+ for i in range(len(columns)):
+ t = columns[i].strip()
+ if not t:
+ # allow empty columns before and after table, but not in between columns;
+ # e.g. allow ` |---| `, disallow ` ---||--- `
+ if i == 0 or i == len(columns) - 1:
+ continue
+ else:
+ return False
+
+ if not headerLineRe.search(t):
+ return False
+ if charStrAt(t, len(t) - 1) == ":":
+ aligns.append("center" if charStrAt(t, 0) == ":" else "right")
+ elif charStrAt(t, 0) == ":":
+ aligns.append("left")
+ else:
+ aligns.append("")
+
+ lineText = getLine(state, startLine).strip()
+ if "|" not in lineText:
+ return False
+ if state.is_code_block(startLine):
+ return False
+ columns = escapedSplit(lineText)
+ if columns and columns[0] == "":
+ columns.pop(0)
+ if columns and columns[-1] == "":
+ columns.pop()
+
+ # header row will define an amount of columns in the entire table,
+ # and align row should be exactly the same (the rest of the rows can differ)
+ columnCount = len(columns)
+ if columnCount == 0 or columnCount != len(aligns):
+ return False
+
+ if silent:
+ return True
+
+ oldParentType = state.parentType
+ state.parentType = "table"
+
+ # use 'blockquote' lists for termination because it's
+ # the most similar to tables
+ terminatorRules = state.md.block.ruler.getRules("blockquote")
+
+ token = state.push("table_open", "table", 1)
+ token.map = tableLines = [startLine, 0]
+
+ token = state.push("thead_open", "thead", 1)
+ token.map = [startLine, startLine + 1]
+
+ token = state.push("tr_open", "tr", 1)
+ token.map = [startLine, startLine + 1]
+
+ for i in range(len(columns)):
+ token = state.push("th_open", "th", 1)
+ if aligns[i]:
+ token.attrs = {"style": "text-align:" + aligns[i]}
+
+ token = state.push("inline", "", 0)
+ # note in markdown-it this map was removed in v12.0.0 however, we keep it,
+ # since it is helpful to propagate to children tokens
+ token.map = [startLine, startLine + 1]
+ token.content = columns[i].strip()
+ token.children = []
+
+ token = state.push("th_close", "th", -1)
+
+ token = state.push("tr_close", "tr", -1)
+ token = state.push("thead_close", "thead", -1)
+
+ autocompleted_cells = 0
+ nextLine = startLine + 2
+ while nextLine < endLine:
+ if state.sCount[nextLine] < state.blkIndent:
+ break
+
+ terminate = False
+ for i in range(len(terminatorRules)):
+ if terminatorRules[i](state, nextLine, endLine, True):
+ terminate = True
+ break
+
+ if terminate:
+ break
+ lineText = getLine(state, nextLine).strip()
+ if not lineText:
+ break
+ if state.is_code_block(nextLine):
+ break
+ columns = escapedSplit(lineText)
+ if columns and columns[0] == "":
+ columns.pop(0)
+ if columns and columns[-1] == "":
+ columns.pop()
+
+ # note: autocomplete count can be negative if user specifies more columns than header,
+ # but that does not affect intended use (which is limiting expansion)
+ autocompleted_cells += columnCount - len(columns)
+ if autocompleted_cells > MAX_AUTOCOMPLETED_CELLS:
+ break
+
+ if nextLine == startLine + 2:
+ token = state.push("tbody_open", "tbody", 1)
+ token.map = tbodyLines = [startLine + 2, 0]
+
+ token = state.push("tr_open", "tr", 1)
+ token.map = [nextLine, nextLine + 1]
+
+ for i in range(columnCount):
+ token = state.push("td_open", "td", 1)
+ if aligns[i]:
+ token.attrs = {"style": "text-align:" + aligns[i]}
+
+ token = state.push("inline", "", 0)
+ # note in markdown-it this map was removed in v12.0.0 however, we keep it,
+ # since it is helpful to propagate to children tokens
+ token.map = [nextLine, nextLine + 1]
+ try:
+ token.content = columns[i].strip() if columns[i] else ""
+ except IndexError:
+ token.content = ""
+ token.children = []
+
+ token = state.push("td_close", "td", -1)
+
+ token = state.push("tr_close", "tr", -1)
+
+ nextLine += 1
+
+ if tbodyLines:
+ token = state.push("tbody_close", "tbody", -1)
+ tbodyLines[1] = nextLine
+
+ token = state.push("table_close", "table", -1)
+
+ tableLines[1] = nextLine
+ state.parentType = oldParentType
+ state.line = nextLine
+ return True
diff --git a/lib/markdown_it/rules_core/__init__.py b/lib/markdown_it/rules_core/__init__.py
new file mode 100644
index 0000000..e7d7753
--- /dev/null
+++ b/lib/markdown_it/rules_core/__init__.py
@@ -0,0 +1,19 @@
+__all__ = (
+ "StateCore",
+ "block",
+ "inline",
+ "linkify",
+ "normalize",
+ "replace",
+ "smartquotes",
+ "text_join",
+)
+
+from .block import block
+from .inline import inline
+from .linkify import linkify
+from .normalize import normalize
+from .replacements import replace
+from .smartquotes import smartquotes
+from .state_core import StateCore
+from .text_join import text_join
diff --git a/lib/markdown_it/rules_core/__pycache__/__init__.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..c197c7e
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/__pycache__/block.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/block.cpython-314.pyc
new file mode 100644
index 0000000..af5505c
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/block.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/__pycache__/inline.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/inline.cpython-314.pyc
new file mode 100644
index 0000000..9d3b6bd
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/inline.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/__pycache__/linkify.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/linkify.cpython-314.pyc
new file mode 100644
index 0000000..e48cabf
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/linkify.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/__pycache__/normalize.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/normalize.cpython-314.pyc
new file mode 100644
index 0000000..3aa299d
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/normalize.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/__pycache__/replacements.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/replacements.cpython-314.pyc
new file mode 100644
index 0000000..1bdeba6
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/replacements.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/__pycache__/smartquotes.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/smartquotes.cpython-314.pyc
new file mode 100644
index 0000000..c5b9a31
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/smartquotes.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/__pycache__/state_core.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/state_core.cpython-314.pyc
new file mode 100644
index 0000000..bbe2789
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/state_core.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/__pycache__/text_join.cpython-314.pyc b/lib/markdown_it/rules_core/__pycache__/text_join.cpython-314.pyc
new file mode 100644
index 0000000..9190b35
Binary files /dev/null and b/lib/markdown_it/rules_core/__pycache__/text_join.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_core/block.py b/lib/markdown_it/rules_core/block.py
new file mode 100644
index 0000000..a6c3bb8
--- /dev/null
+++ b/lib/markdown_it/rules_core/block.py
@@ -0,0 +1,13 @@
+from ..token import Token
+from .state_core import StateCore
+
+
+def block(state: StateCore) -> None:
+ if state.inlineMode:
+ token = Token("inline", "", 0)
+ token.content = state.src
+ token.map = [0, 1]
+ token.children = []
+ state.tokens.append(token)
+ else:
+ state.md.block.parse(state.src, state.md, state.env, state.tokens)
diff --git a/lib/markdown_it/rules_core/inline.py b/lib/markdown_it/rules_core/inline.py
new file mode 100644
index 0000000..c3fd0b5
--- /dev/null
+++ b/lib/markdown_it/rules_core/inline.py
@@ -0,0 +1,10 @@
+from .state_core import StateCore
+
+
+def inline(state: StateCore) -> None:
+ """Parse inlines"""
+ for token in state.tokens:
+ if token.type == "inline":
+ if token.children is None:
+ token.children = []
+ state.md.inline.parse(token.content, state.md, state.env, token.children)
diff --git a/lib/markdown_it/rules_core/linkify.py b/lib/markdown_it/rules_core/linkify.py
new file mode 100644
index 0000000..efbc9d4
--- /dev/null
+++ b/lib/markdown_it/rules_core/linkify.py
@@ -0,0 +1,149 @@
+from __future__ import annotations
+
+import re
+from typing import Protocol
+
+from ..common.utils import arrayReplaceAt, isLinkClose, isLinkOpen
+from ..token import Token
+from .state_core import StateCore
+
+HTTP_RE = re.compile(r"^http://")
+MAILTO_RE = re.compile(r"^mailto:")
+TEST_MAILTO_RE = re.compile(r"^mailto:", flags=re.IGNORECASE)
+
+
+def linkify(state: StateCore) -> None:
+ """Rule for identifying plain-text links."""
+ if not state.md.options.linkify:
+ return
+
+ if not state.md.linkify:
+ raise ModuleNotFoundError("Linkify enabled but not installed.")
+
+ for inline_token in state.tokens:
+ if inline_token.type != "inline" or not state.md.linkify.pretest(
+ inline_token.content
+ ):
+ continue
+
+ tokens = inline_token.children
+
+ htmlLinkLevel = 0
+
+ # We scan from the end, to keep position when new tags added.
+ # Use reversed logic in links start/end match
+ assert tokens is not None
+ i = len(tokens)
+ while i >= 1:
+ i -= 1
+ assert isinstance(tokens, list)
+ currentToken = tokens[i]
+
+ # Skip content of markdown links
+ if currentToken.type == "link_close":
+ i -= 1
+ while (
+ tokens[i].level != currentToken.level
+ and tokens[i].type != "link_open"
+ ):
+ i -= 1
+ continue
+
+ # Skip content of html tag links
+ if currentToken.type == "html_inline":
+ if isLinkOpen(currentToken.content) and htmlLinkLevel > 0:
+ htmlLinkLevel -= 1
+ if isLinkClose(currentToken.content):
+ htmlLinkLevel += 1
+ if htmlLinkLevel > 0:
+ continue
+
+ if currentToken.type == "text" and state.md.linkify.test(
+ currentToken.content
+ ):
+ text = currentToken.content
+ links: list[_LinkType] = state.md.linkify.match(text) or []
+
+ # Now split string to nodes
+ nodes = []
+ level = currentToken.level
+ lastPos = 0
+
+ # forbid escape sequence at the start of the string,
+ # this avoids http\://example.com/ from being linkified as
+ # http://example.com/
+ if (
+ links
+ and links[0].index == 0
+ and i > 0
+ and tokens[i - 1].type == "text_special"
+ ):
+ links = links[1:]
+
+ for link in links:
+ url = link.url
+ fullUrl = state.md.normalizeLink(url)
+ if not state.md.validateLink(fullUrl):
+ continue
+
+ urlText = link.text
+
+ # Linkifier might send raw hostnames like "example.com", where url
+ # starts with domain name. So we prepend http:// in those cases,
+ # and remove it afterwards.
+ if not link.schema:
+ urlText = HTTP_RE.sub(
+ "", state.md.normalizeLinkText("http://" + urlText)
+ )
+ elif link.schema == "mailto:" and TEST_MAILTO_RE.search(urlText):
+ urlText = MAILTO_RE.sub(
+ "", state.md.normalizeLinkText("mailto:" + urlText)
+ )
+ else:
+ urlText = state.md.normalizeLinkText(urlText)
+
+ pos = link.index
+
+ if pos > lastPos:
+ token = Token("text", "", 0)
+ token.content = text[lastPos:pos]
+ token.level = level
+ nodes.append(token)
+
+ token = Token("link_open", "a", 1)
+ token.attrs = {"href": fullUrl}
+ token.level = level
+ level += 1
+ token.markup = "linkify"
+ token.info = "auto"
+ nodes.append(token)
+
+ token = Token("text", "", 0)
+ token.content = urlText
+ token.level = level
+ nodes.append(token)
+
+ token = Token("link_close", "a", -1)
+ level -= 1
+ token.level = level
+ token.markup = "linkify"
+ token.info = "auto"
+ nodes.append(token)
+
+ lastPos = link.last_index
+
+ if lastPos < len(text):
+ token = Token("text", "", 0)
+ token.content = text[lastPos:]
+ token.level = level
+ nodes.append(token)
+
+ inline_token.children = tokens = arrayReplaceAt(tokens, i, nodes)
+
+
+class _LinkType(Protocol):
+ url: str
+ text: str
+ index: int
+ last_index: int
+ schema: str | None
diff --git a/lib/markdown_it/rules_core/normalize.py b/lib/markdown_it/rules_core/normalize.py
new file mode 100644
index 0000000..3243924
--- /dev/null
+++ b/lib/markdown_it/rules_core/normalize.py
@@ -0,0 +1,19 @@
+"""Normalize input string."""
+
+import re
+
+from .state_core import StateCore
+
+# https://spec.commonmark.org/0.29/#line-ending
+NEWLINES_RE = re.compile(r"\r\n?|\n")
+NULL_RE = re.compile(r"\0")
+
+
+def normalize(state: StateCore) -> None:
+ # Normalize newlines
+ string = NEWLINES_RE.sub("\n", state.src)
+
+ # Replace NULL characters
+ string = NULL_RE.sub("\ufffd", string)
+
+ state.src = string
diff --git a/lib/markdown_it/rules_core/replacements.py b/lib/markdown_it/rules_core/replacements.py
new file mode 100644
index 0000000..bcc9980
--- /dev/null
+++ b/lib/markdown_it/rules_core/replacements.py
@@ -0,0 +1,127 @@
+"""Simple typographic replacements
+
+* ``(c)``, ``(C)`` → ©
+* ``(tm)``, ``(TM)`` → ™
+* ``(r)``, ``(R)`` → ®
+* ``+-`` → ±
+* ``...`` → …
+* ``?....`` → ?..
+* ``!....`` → !..
+* ``????????`` → ???
+* ``!!!!!`` → !!!
+* ``,,,`` → ,
+* ``--`` → &ndash
+* ``---`` → &mdash
+"""
+
+from __future__ import annotations
+
+import logging
+import re
+
+from ..token import Token
+from .state_core import StateCore
+
+LOGGER = logging.getLogger(__name__)
+
+# TODO:
+# - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾
+# - multiplication 2 x 4 -> 2 × 4
+
+RARE_RE = re.compile(r"\+-|\.\.|\?\?\?\?|!!!!|,,|--")
+
+# Workaround for phantomjs - need regex without /g flag,
+# or root check will fail every second time
+# SCOPED_ABBR_TEST_RE = r"\((c|tm|r)\)"
+
+SCOPED_ABBR_RE = re.compile(r"\((c|tm|r)\)", flags=re.IGNORECASE)
+
+PLUS_MINUS_RE = re.compile(r"\+-")
+
+ELLIPSIS_RE = re.compile(r"\.{2,}")
+
+ELLIPSIS_QUESTION_EXCLAMATION_RE = re.compile(r"([?!])…")
+
+QUESTION_EXCLAMATION_RE = re.compile(r"([?!]){4,}")
+
+COMMA_RE = re.compile(r",{2,}")
+
+EM_DASH_RE = re.compile(r"(^|[^-])---(?=[^-]|$)", flags=re.MULTILINE)
+
+EN_DASH_RE = re.compile(r"(^|\s)--(?=\s|$)", flags=re.MULTILINE)
+
+EN_DASH_INDENT_RE = re.compile(r"(^|[^-\s])--(?=[^-\s]|$)", flags=re.MULTILINE)
+
+
+SCOPED_ABBR = {"c": "©", "r": "®", "tm": "™"}
+
+
+def replaceFn(match: re.Match[str]) -> str:
+ return SCOPED_ABBR[match.group(1).lower()]
+
+
+def replace_scoped(inlineTokens: list[Token]) -> None:
+ inside_autolink = 0
+
+ for token in inlineTokens:
+ if token.type == "text" and not inside_autolink:
+ token.content = SCOPED_ABBR_RE.sub(replaceFn, token.content)
+
+ if token.type == "link_open" and token.info == "auto":
+ inside_autolink -= 1
+
+ if token.type == "link_close" and token.info == "auto":
+ inside_autolink += 1
+
+
+def replace_rare(inlineTokens: list[Token]) -> None:
+ inside_autolink = 0
+
+ for token in inlineTokens:
+ if (
+ token.type == "text"
+ and (not inside_autolink)
+ and RARE_RE.search(token.content)
+ ):
+ # +- -> ±
+ token.content = PLUS_MINUS_RE.sub("±", token.content)
+
+ # .., ..., ....... -> …
+ token.content = ELLIPSIS_RE.sub("…", token.content)
+
+ # but ?..... & !..... -> ?.. & !..
+ token.content = ELLIPSIS_QUESTION_EXCLAMATION_RE.sub("\\1..", token.content)
+ token.content = QUESTION_EXCLAMATION_RE.sub("\\1\\1\\1", token.content)
+
+ # ,, ,,, ,,,, -> ,
+ token.content = COMMA_RE.sub(",", token.content)
+
+ # em-dash
+ token.content = EM_DASH_RE.sub("\\1\u2014", token.content)
+
+ # en-dash
+ token.content = EN_DASH_RE.sub("\\1\u2013", token.content)
+ token.content = EN_DASH_INDENT_RE.sub("\\1\u2013", token.content)
+
+ if token.type == "link_open" and token.info == "auto":
+ inside_autolink -= 1
+
+ if token.type == "link_close" and token.info == "auto":
+ inside_autolink += 1
+
+
+def replace(state: StateCore) -> None:
+ if not state.md.options.typographer:
+ return
+
+ for token in state.tokens:
+ if token.type != "inline":
+ continue
+ if token.children is None:
+ continue
+
+ if SCOPED_ABBR_RE.search(token.content):
+ replace_scoped(token.children)
+
+ if RARE_RE.search(token.content):
+ replace_rare(token.children)
diff --git a/lib/markdown_it/rules_core/smartquotes.py b/lib/markdown_it/rules_core/smartquotes.py
new file mode 100644
index 0000000..f9b8b45
--- /dev/null
+++ b/lib/markdown_it/rules_core/smartquotes.py
@@ -0,0 +1,202 @@
+"""Convert straight quotation marks to typographic ones"""
+
+from __future__ import annotations
+
+import re
+from typing import Any
+
+from ..common.utils import charCodeAt, isMdAsciiPunct, isPunctChar, isWhiteSpace
+from ..token import Token
+from .state_core import StateCore
+
+QUOTE_TEST_RE = re.compile(r"['\"]")
+QUOTE_RE = re.compile(r"['\"]")
+APOSTROPHE = "\u2019" # ’
+
+
+def replaceAt(string: str, index: int, ch: str) -> str:
+ # When the index is negative, the behavior is different from the js version.
+ # But basically, the index will not be negative.
+ assert index >= 0
+ return string[:index] + ch + string[index + 1 :]
+
+
+def process_inlines(tokens: list[Token], state: StateCore) -> None:
+ stack: list[dict[str, Any]] = []
+
+ for i, token in enumerate(tokens):
+ thisLevel = token.level
+
+ j = 0
+ for j in range(len(stack))[::-1]:
+ if stack[j]["level"] <= thisLevel:
+ break
+ else:
+ # When the loop is terminated without a "break".
+ # Subtract 1 to get the same index as the js version.
+ j -= 1
+
+ stack = stack[: j + 1]
+
+ if token.type != "text":
+ continue
+
+ text = token.content
+ pos = 0
+ maximum = len(text)
+
+ while pos < maximum:
+ goto_outer = False
+ lastIndex = pos
+ t = QUOTE_RE.search(text[lastIndex:])
+ if not t:
+ break
+
+ canOpen = canClose = True
+ pos = t.start(0) + lastIndex + 1
+ isSingle = t.group(0) == "'"
+
+ # Find previous character,
+ # default to space if it's the beginning of the line
+ lastChar: None | int = 0x20
+
+ if t.start(0) + lastIndex - 1 >= 0:
+ lastChar = charCodeAt(text, t.start(0) + lastIndex - 1)
+ else:
+ for j in range(i)[::-1]:
+ if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak":
+ break
+ # should skip all tokens except 'text', 'html_inline' or 'code_inline'
+ if not tokens[j].content:
+ continue
+
+ lastChar = charCodeAt(tokens[j].content, len(tokens[j].content) - 1)
+ break
+
+ # Find next character,
+ # default to space if it's the end of the line
+ nextChar: None | int = 0x20
+
+ if pos < maximum:
+ nextChar = charCodeAt(text, pos)
+ else:
+ for j in range(i + 1, len(tokens)):
+ # nextChar defaults to 0x20
+ if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak":
+ break
+ # should skip all tokens except 'text', 'html_inline' or 'code_inline'
+ if not tokens[j].content:
+ continue
+
+ nextChar = charCodeAt(tokens[j].content, 0)
+ break
+
+ isLastPunctChar = lastChar is not None and (
+ isMdAsciiPunct(lastChar) or isPunctChar(chr(lastChar))
+ )
+ isNextPunctChar = nextChar is not None and (
+ isMdAsciiPunct(nextChar) or isPunctChar(chr(nextChar))
+ )
+
+ isLastWhiteSpace = lastChar is not None and isWhiteSpace(lastChar)
+ isNextWhiteSpace = nextChar is not None and isWhiteSpace(nextChar)
+
+ if isNextWhiteSpace: # noqa: SIM114
+ canOpen = False
+ elif isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar):
+ canOpen = False
+
+ if isLastWhiteSpace: # noqa: SIM114
+ canClose = False
+ elif isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar):
+ canClose = False
+
+ if nextChar == 0x22 and t.group(0) == '"': # 0x22: " # noqa: SIM102
+ if (
+ lastChar is not None and lastChar >= 0x30 and lastChar <= 0x39
+ ): # 0x30: 0, 0x39: 9
+ # special case: 1"" - count first quote as an inch
+ canClose = canOpen = False
+
+ if canOpen and canClose:
+ # Replace quotes in the middle of punctuation sequence, but not
+ # in the middle of the words, i.e.:
+ #
+ # 1. foo " bar " baz - not replaced
+ # 2. foo-"-bar-"-baz - replaced
+ # 3. foo"bar"baz - not replaced
+ canOpen = isLastPunctChar
+ canClose = isNextPunctChar
+
+ if not canOpen and not canClose:
+ # middle of word
+ if isSingle:
+ token.content = replaceAt(
+ token.content, t.start(0) + lastIndex, APOSTROPHE
+ )
+ continue
+
+ if canClose:
+ # this could be a closing quote, rewind the stack to get a match
+ for j in range(len(stack))[::-1]:
+ item = stack[j]
+ if stack[j]["level"] < thisLevel:
+ break
+ if item["single"] == isSingle and stack[j]["level"] == thisLevel:
+ item = stack[j]
+
+ if isSingle:
+ openQuote = state.md.options.quotes[2]
+ closeQuote = state.md.options.quotes[3]
+ else:
+ openQuote = state.md.options.quotes[0]
+ closeQuote = state.md.options.quotes[1]
+
+ # replace token.content *before* tokens[item.token].content,
+ # because, if they are pointing at the same token, replaceAt
+ # could mess up indices when quote length != 1
+ token.content = replaceAt(
+ token.content, t.start(0) + lastIndex, closeQuote
+ )
+ tokens[item["token"]].content = replaceAt(
+ tokens[item["token"]].content, item["pos"], openQuote
+ )
+
+ pos += len(closeQuote) - 1
+ if item["token"] == i:
+ pos += len(openQuote) - 1
+
+ text = token.content
+ maximum = len(text)
+
+ stack = stack[:j]
+ goto_outer = True
+ break
+ if goto_outer:
+ goto_outer = False
+ continue
+
+ if canOpen:
+ stack.append(
+ {
+ "token": i,
+ "pos": t.start(0) + lastIndex,
+ "single": isSingle,
+ "level": thisLevel,
+ }
+ )
+ elif canClose and isSingle:
+ token.content = replaceAt(
+ token.content, t.start(0) + lastIndex, APOSTROPHE
+ )
+
+
+def smartquotes(state: StateCore) -> None:
+ if not state.md.options.typographer:
+ return
+
+ for token in state.tokens:
+ if token.type != "inline" or not QUOTE_RE.search(token.content):
+ continue
+ if token.children is not None:
+ process_inlines(token.children, state)
diff --git a/lib/markdown_it/rules_core/state_core.py b/lib/markdown_it/rules_core/state_core.py
new file mode 100644
index 0000000..a938041
--- /dev/null
+++ b/lib/markdown_it/rules_core/state_core.py
@@ -0,0 +1,25 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from ..ruler import StateBase
+from ..token import Token
+from ..utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it import MarkdownIt
+
+
+class StateCore(StateBase):
+ def __init__(
+ self,
+ src: str,
+ md: MarkdownIt,
+ env: EnvType,
+ tokens: list[Token] | None = None,
+ ) -> None:
+ self.src = src
+ self.md = md # link to parser instance
+ self.env = env
+ self.tokens: list[Token] = tokens or []
+ self.inlineMode = False
diff --git a/lib/markdown_it/rules_core/text_join.py b/lib/markdown_it/rules_core/text_join.py
new file mode 100644
index 0000000..5379f6d
--- /dev/null
+++ b/lib/markdown_it/rules_core/text_join.py
@@ -0,0 +1,35 @@
+"""Join raw text tokens with the rest of the text
+
+This is set as a separate rule to provide an opportunity for plugins
+to run text replacements after text join, but before escape join.
+
+For example, `\\:)` shouldn't be replaced with an emoji.
+"""
+
+from __future__ import annotations
+
+from ..token import Token
+from .state_core import StateCore
+
+
+def text_join(state: StateCore) -> None:
+ """Join raw text for escape sequences (`text_special`) tokens with the rest of the text"""
+
+ for inline_token in state.tokens[:]:
+ if inline_token.type != "inline":
+ continue
+
+ # convert text_special to text and join all adjacent text nodes
+ new_tokens: list[Token] = []
+ for child_token in inline_token.children or []:
+ if child_token.type == "text_special":
+ child_token.type = "text"
+ if (
+ child_token.type == "text"
+ and new_tokens
+ and new_tokens[-1].type == "text"
+ ):
+ new_tokens[-1].content += child_token.content
+ else:
+ new_tokens.append(child_token)
+ inline_token.children = new_tokens
diff --git a/lib/markdown_it/rules_inline/__init__.py b/lib/markdown_it/rules_inline/__init__.py
new file mode 100644
index 0000000..d82ef8f
--- /dev/null
+++ b/lib/markdown_it/rules_inline/__init__.py
@@ -0,0 +1,31 @@
+__all__ = (
+ "StateInline",
+ "autolink",
+ "backtick",
+ "emphasis",
+ "entity",
+ "escape",
+ "fragments_join",
+ "html_inline",
+ "image",
+ "link",
+ "link_pairs",
+ "linkify",
+ "newline",
+ "strikethrough",
+ "text",
+)
+from . import emphasis, strikethrough
+from .autolink import autolink
+from .backticks import backtick
+from .balance_pairs import link_pairs
+from .entity import entity
+from .escape import escape
+from .fragments_join import fragments_join
+from .html_inline import html_inline
+from .image import image
+from .link import link
+from .linkify import linkify
+from .newline import newline
+from .state_inline import StateInline
+from .text import text
diff --git a/lib/markdown_it/rules_inline/__pycache__/__init__.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..d11fe81
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/autolink.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/autolink.cpython-314.pyc
new file mode 100644
index 0000000..170c101
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/autolink.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/backticks.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/backticks.cpython-314.pyc
new file mode 100644
index 0000000..9958038
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/backticks.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-314.pyc
new file mode 100644
index 0000000..bd1ab9d
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/emphasis.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/emphasis.cpython-314.pyc
new file mode 100644
index 0000000..05eabeb
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/emphasis.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/entity.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/entity.cpython-314.pyc
new file mode 100644
index 0000000..8f1ad41
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/entity.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/escape.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/escape.cpython-314.pyc
new file mode 100644
index 0000000..1650fd2
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/escape.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/fragments_join.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/fragments_join.cpython-314.pyc
new file mode 100644
index 0000000..b7a02d6
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/fragments_join.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/html_inline.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/html_inline.cpython-314.pyc
new file mode 100644
index 0000000..cbdcc08
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/html_inline.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/image.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/image.cpython-314.pyc
new file mode 100644
index 0000000..40bd7ac
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/image.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/link.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/link.cpython-314.pyc
new file mode 100644
index 0000000..853e646
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/link.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/linkify.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/linkify.cpython-314.pyc
new file mode 100644
index 0000000..ad57602
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/linkify.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/newline.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/newline.cpython-314.pyc
new file mode 100644
index 0000000..e34886b
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/newline.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/state_inline.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/state_inline.cpython-314.pyc
new file mode 100644
index 0000000..5ba02aa
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/state_inline.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/strikethrough.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/strikethrough.cpython-314.pyc
new file mode 100644
index 0000000..4aab224
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/strikethrough.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/__pycache__/text.cpython-314.pyc b/lib/markdown_it/rules_inline/__pycache__/text.cpython-314.pyc
new file mode 100644
index 0000000..ea97dd1
Binary files /dev/null and b/lib/markdown_it/rules_inline/__pycache__/text.cpython-314.pyc differ
diff --git a/lib/markdown_it/rules_inline/autolink.py b/lib/markdown_it/rules_inline/autolink.py
new file mode 100644
index 0000000..6546e25
--- /dev/null
+++ b/lib/markdown_it/rules_inline/autolink.py
@@ -0,0 +1,77 @@
+# Process autolinks ''
+import re
+
+from .state_inline import StateInline
+
+EMAIL_RE = re.compile(
+ r"^([a-zA-Z0-9.!#$%&\'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$"
+)
+AUTOLINK_RE = re.compile(r"^([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)$")
+
+
+def autolink(state: StateInline, silent: bool) -> bool:
+ pos = state.pos
+
+ if state.src[pos] != "<":
+ return False
+
+ start = state.pos
+ maximum = state.posMax
+
+ while True:
+ pos += 1
+ if pos >= maximum:
+ return False
+
+ ch = state.src[pos]
+
+ if ch == "<":
+ return False
+ if ch == ">":
+ break
+
+ url = state.src[start + 1 : pos]
+
+ if AUTOLINK_RE.search(url) is not None:
+ fullUrl = state.md.normalizeLink(url)
+ if not state.md.validateLink(fullUrl):
+ return False
+
+ if not silent:
+ token = state.push("link_open", "a", 1)
+ token.attrs = {"href": fullUrl}
+ token.markup = "autolink"
+ token.info = "auto"
+
+ token = state.push("text", "", 0)
+ token.content = state.md.normalizeLinkText(url)
+
+ token = state.push("link_close", "a", -1)
+ token.markup = "autolink"
+ token.info = "auto"
+
+ state.pos += len(url) + 2
+ return True
+
+ if EMAIL_RE.search(url) is not None:
+ fullUrl = state.md.normalizeLink("mailto:" + url)
+ if not state.md.validateLink(fullUrl):
+ return False
+
+ if not silent:
+ token = state.push("link_open", "a", 1)
+ token.attrs = {"href": fullUrl}
+ token.markup = "autolink"
+ token.info = "auto"
+
+ token = state.push("text", "", 0)
+ token.content = state.md.normalizeLinkText(url)
+
+ token = state.push("link_close", "a", -1)
+ token.markup = "autolink"
+ token.info = "auto"
+
+ state.pos += len(url) + 2
+ return True
+
+ return False
diff --git a/lib/markdown_it/rules_inline/backticks.py b/lib/markdown_it/rules_inline/backticks.py
new file mode 100644
index 0000000..fc60d6b
--- /dev/null
+++ b/lib/markdown_it/rules_inline/backticks.py
@@ -0,0 +1,72 @@
+# Parse backticks
+import re
+
+from .state_inline import StateInline
+
+regex = re.compile("^ (.+) $")
+
+
+def backtick(state: StateInline, silent: bool) -> bool:
+ pos = state.pos
+
+ if state.src[pos] != "`":
+ return False
+
+ start = pos
+ pos += 1
+ maximum = state.posMax
+
+ # scan marker length
+ while pos < maximum and (state.src[pos] == "`"):
+ pos += 1
+
+ marker = state.src[start:pos]
+ openerLength = len(marker)
+
+ if state.backticksScanned and state.backticks.get(openerLength, 0) <= start:
+ if not silent:
+ state.pending += marker
+ state.pos += openerLength
+ return True
+
+ matchStart = matchEnd = pos
+
+ # Nothing found in the cache, scan until the end of the line (or until marker is found)
+ while True:
+ try:
+ matchStart = state.src.index("`", matchEnd)
+ except ValueError:
+ break
+ matchEnd = matchStart + 1
+
+ # scan marker length
+ while matchEnd < maximum and (state.src[matchEnd] == "`"):
+ matchEnd += 1
+
+ closerLength = matchEnd - matchStart
+
+ if closerLength == openerLength:
+ # Found matching closer length.
+ if not silent:
+ token = state.push("code_inline", "code", 0)
+ token.markup = marker
+ token.content = state.src[pos:matchStart].replace("\n", " ")
+ if (
+ token.content.startswith(" ")
+ and token.content.endswith(" ")
+ and len(token.content.strip()) > 0
+ ):
+ token.content = token.content[1:-1]
+ state.pos = matchEnd
+ return True
+
+ # Some different length found, put it in cache as upper limit of where closer can be found
+ state.backticks[closerLength] = matchStart
+
+ # Scanned through the end, didn't find anything
+ state.backticksScanned = True
+
+ if not silent:
+ state.pending += marker
+ state.pos += openerLength
+ return True
diff --git a/lib/markdown_it/rules_inline/balance_pairs.py b/lib/markdown_it/rules_inline/balance_pairs.py
new file mode 100644
index 0000000..9c63b27
--- /dev/null
+++ b/lib/markdown_it/rules_inline/balance_pairs.py
@@ -0,0 +1,138 @@
+"""Balance paired characters (*, _, etc) in inline tokens."""
+
+from __future__ import annotations
+
+from .state_inline import Delimiter, StateInline
+
+
+def processDelimiters(state: StateInline, delimiters: list[Delimiter]) -> None:
+ """For each opening emphasis-like marker find a matching closing one."""
+ if not delimiters:
+ return
+
+ openersBottom = {}
+ maximum = len(delimiters)
+
+ # headerIdx is the first delimiter of the current (where closer is) delimiter run
+ headerIdx = 0
+ lastTokenIdx = -2 # needs any value lower than -1
+ jumps: list[int] = []
+ closerIdx = 0
+ while closerIdx < maximum:
+ closer = delimiters[closerIdx]
+
+ jumps.append(0)
+
+ # markers belong to same delimiter run if:
+ # - they have adjacent tokens
+ # - AND markers are the same
+ #
+ if (
+ delimiters[headerIdx].marker != closer.marker
+ or lastTokenIdx != closer.token - 1
+ ):
+ headerIdx = closerIdx
+ lastTokenIdx = closer.token
+
+ # Length is only used for emphasis-specific "rule of 3",
+ # if it's not defined (in strikethrough or 3rd party plugins),
+ # we can default it to 0 to disable those checks.
+ #
+ closer.length = closer.length or 0
+
+ if not closer.close:
+ closerIdx += 1
+ continue
+
+ # Previously calculated lower bounds (previous fails)
+ # for each marker, each delimiter length modulo 3,
+ # and for whether this closer can be an opener;
+ # https://github.com/commonmark/cmark/commit/34250e12ccebdc6372b8b49c44fab57c72443460
+ if closer.marker not in openersBottom:
+ openersBottom[closer.marker] = [-1, -1, -1, -1, -1, -1]
+
+ minOpenerIdx = openersBottom[closer.marker][
+ (3 if closer.open else 0) + (closer.length % 3)
+ ]
+
+ openerIdx = headerIdx - jumps[headerIdx] - 1
+
+ newMinOpenerIdx = openerIdx
+
+ while openerIdx > minOpenerIdx:
+ opener = delimiters[openerIdx]
+
+ if opener.marker != closer.marker:
+ openerIdx -= jumps[openerIdx] + 1
+ continue
+
+ if opener.open and opener.end < 0:
+ isOddMatch = False
+
+ # from spec:
+ #
+ # If one of the delimiters can both open and close emphasis, then the
+ # sum of the lengths of the delimiter runs containing the opening and
+ # closing delimiters must not be a multiple of 3 unless both lengths
+ # are multiples of 3.
+ #
+ if (
+ (opener.close or closer.open)
+ and ((opener.length + closer.length) % 3 == 0)
+ and (opener.length % 3 != 0 or closer.length % 3 != 0)
+ ):
+ isOddMatch = True
+
+ if not isOddMatch:
+ # If previous delimiter cannot be an opener, we can safely skip
+ # the entire sequence in future checks. This is required to make
+ # sure algorithm has linear complexity (see *_*_*_*_*_... case).
+ #
+ if openerIdx > 0 and not delimiters[openerIdx - 1].open:
+ lastJump = jumps[openerIdx - 1] + 1
+ else:
+ lastJump = 0
+
+ jumps[closerIdx] = closerIdx - openerIdx + lastJump
+ jumps[openerIdx] = lastJump
+
+ closer.open = False
+ opener.end = closerIdx
+ opener.close = False
+ newMinOpenerIdx = -1
+
+ # treat next token as start of run,
+ # it optimizes skips in **<...>**a**<...>** pathological case
+ lastTokenIdx = -2
+
+ break
+
+ openerIdx -= jumps[openerIdx] + 1
+
+ if newMinOpenerIdx != -1:
+ # If match for this delimiter run failed, we want to set lower bound for
+ # future lookups. This is required to make sure algorithm has linear
+ # complexity.
+ #
+ # See details here:
+ # https:#github.com/commonmark/cmark/issues/178#issuecomment-270417442
+ #
+ openersBottom[closer.marker][
+ (3 if closer.open else 0) + ((closer.length or 0) % 3)
+ ] = newMinOpenerIdx
+
+ closerIdx += 1
+
+
+def link_pairs(state: StateInline) -> None:
+ tokens_meta = state.tokens_meta
+ maximum = len(state.tokens_meta)
+
+ processDelimiters(state, state.delimiters)
+
+ curr = 0
+ while curr < maximum:
+ curr_meta = tokens_meta[curr]
+ if curr_meta and "delimiters" in curr_meta:
+ processDelimiters(state, curr_meta["delimiters"])
+ curr += 1
diff --git a/lib/markdown_it/rules_inline/emphasis.py b/lib/markdown_it/rules_inline/emphasis.py
new file mode 100644
index 0000000..9a98f9e
--- /dev/null
+++ b/lib/markdown_it/rules_inline/emphasis.py
@@ -0,0 +1,102 @@
+# Process *this* and _that_
+#
+from __future__ import annotations
+
+from .state_inline import Delimiter, StateInline
+
+
+def tokenize(state: StateInline, silent: bool) -> bool:
+ """Insert each marker as a separate text token, and add it to delimiter list"""
+ start = state.pos
+ marker = state.src[start]
+
+ if silent:
+ return False
+
+ if marker not in ("_", "*"):
+ return False
+
+ scanned = state.scanDelims(state.pos, marker == "*")
+
+ for _ in range(scanned.length):
+ token = state.push("text", "", 0)
+ token.content = marker
+ state.delimiters.append(
+ Delimiter(
+ marker=ord(marker),
+ length=scanned.length,
+ token=len(state.tokens) - 1,
+ end=-1,
+ open=scanned.can_open,
+ close=scanned.can_close,
+ )
+ )
+
+ state.pos += scanned.length
+
+ return True
+
+
+def _postProcess(state: StateInline, delimiters: list[Delimiter]) -> None:
+ i = len(delimiters) - 1
+ while i >= 0:
+ startDelim = delimiters[i]
+
+ # /* _ */ /* * */
+ if startDelim.marker != 0x5F and startDelim.marker != 0x2A:
+ i -= 1
+ continue
+
+ # Process only opening markers
+ if startDelim.end == -1:
+ i -= 1
+ continue
+
+ endDelim = delimiters[startDelim.end]
+
+ # If the previous delimiter has the same marker and is adjacent to this one,
+ # merge those into one strong delimiter.
+ #
+ # `whatever` -> `whatever`
+ #
+ isStrong = (
+ i > 0
+ and delimiters[i - 1].end == startDelim.end + 1
+ # check that first two markers match and adjacent
+ and delimiters[i - 1].marker == startDelim.marker
+ and delimiters[i - 1].token == startDelim.token - 1
+ # check that last two markers are adjacent (we can safely assume they match)
+ and delimiters[startDelim.end + 1].token == endDelim.token + 1
+ )
+
+ ch = chr(startDelim.marker)
+
+ token = state.tokens[startDelim.token]
+ token.type = "strong_open" if isStrong else "em_open"
+ token.tag = "strong" if isStrong else "em"
+ token.nesting = 1
+ token.markup = ch + ch if isStrong else ch
+ token.content = ""
+
+ token = state.tokens[endDelim.token]
+ token.type = "strong_close" if isStrong else "em_close"
+ token.tag = "strong" if isStrong else "em"
+ token.nesting = -1
+ token.markup = ch + ch if isStrong else ch
+ token.content = ""
+
+ if isStrong:
+ state.tokens[delimiters[i - 1].token].content = ""
+ state.tokens[delimiters[startDelim.end + 1].token].content = ""
+ i -= 1
+
+ i -= 1
+
+
+def postProcess(state: StateInline) -> None:
+ """Walk through delimiter list and replace text tokens with tags."""
+ _postProcess(state, state.delimiters)
+
+ for token in state.tokens_meta:
+ if token and "delimiters" in token:
+ _postProcess(state, token["delimiters"])
diff --git a/lib/markdown_it/rules_inline/entity.py b/lib/markdown_it/rules_inline/entity.py
new file mode 100644
index 0000000..ec9d396
--- /dev/null
+++ b/lib/markdown_it/rules_inline/entity.py
@@ -0,0 +1,53 @@
+# Process html entity - {, ¯, ", ...
+import re
+
+from ..common.entities import entities
+from ..common.utils import fromCodePoint, isValidEntityCode
+from .state_inline import StateInline
+
+DIGITAL_RE = re.compile(r"^((?:x[a-f0-9]{1,6}|[0-9]{1,7}));", re.IGNORECASE)
+NAMED_RE = re.compile(r"^&([a-z][a-z0-9]{1,31});", re.IGNORECASE)
+
+
+def entity(state: StateInline, silent: bool) -> bool:
+ pos = state.pos
+ maximum = state.posMax
+
+ if state.src[pos] != "&":
+ return False
+
+ if pos + 1 >= maximum:
+ return False
+
+ if state.src[pos + 1] == "#":
+ if match := DIGITAL_RE.search(state.src[pos:]):
+ if not silent:
+ match1 = match.group(1)
+ code = (
+ int(match1[1:], 16) if match1[0].lower() == "x" else int(match1, 10)
+ )
+
+ token = state.push("text_special", "", 0)
+ token.content = (
+ fromCodePoint(code)
+ if isValidEntityCode(code)
+ else fromCodePoint(0xFFFD)
+ )
+ token.markup = match.group(0)
+ token.info = "entity"
+
+ state.pos += len(match.group(0))
+ return True
+
+ else:
+ if (match := NAMED_RE.search(state.src[pos:])) and match.group(1) in entities:
+ if not silent:
+ token = state.push("text_special", "", 0)
+ token.content = entities[match.group(1)]
+ token.markup = match.group(0)
+ token.info = "entity"
+
+ state.pos += len(match.group(0))
+ return True
+
+ return False
diff --git a/lib/markdown_it/rules_inline/escape.py b/lib/markdown_it/rules_inline/escape.py
new file mode 100644
index 0000000..0fca6c8
--- /dev/null
+++ b/lib/markdown_it/rules_inline/escape.py
@@ -0,0 +1,93 @@
+"""
+Process escaped chars and hardbreaks
+"""
+
+from ..common.utils import isStrSpace
+from .state_inline import StateInline
+
+
+def escape(state: StateInline, silent: bool) -> bool:
+ """Process escaped chars and hardbreaks."""
+ pos = state.pos
+ maximum = state.posMax
+
+ if state.src[pos] != "\\":
+ return False
+
+ pos += 1
+
+ # '\' at the end of the inline block
+ if pos >= maximum:
+ return False
+
+ ch1 = state.src[pos]
+ ch1_ord = ord(ch1)
+ if ch1 == "\n":
+ if not silent:
+ state.push("hardbreak", "br", 0)
+ pos += 1
+ # skip leading whitespaces from next line
+ while pos < maximum:
+ ch = state.src[pos]
+ if not isStrSpace(ch):
+ break
+ pos += 1
+
+ state.pos = pos
+ return True
+
+ escapedStr = state.src[pos]
+
+ if ch1_ord >= 0xD800 and ch1_ord <= 0xDBFF and pos + 1 < maximum:
+ ch2 = state.src[pos + 1]
+ ch2_ord = ord(ch2)
+ if ch2_ord >= 0xDC00 and ch2_ord <= 0xDFFF:
+ escapedStr += ch2
+ pos += 1
+
+ origStr = "\\" + escapedStr
+
+ if not silent:
+ token = state.push("text_special", "", 0)
+ token.content = escapedStr if ch1 in _ESCAPED else origStr
+ token.markup = origStr
+ token.info = "escape"
+
+ state.pos = pos + 1
+ return True
+
+
+_ESCAPED = {
+ "!",
+ '"',
+ "#",
+ "$",
+ "%",
+ "&",
+ "'",
+ "(",
+ ")",
+ "*",
+ "+",
+ ",",
+ "-",
+ ".",
+ "/",
+ ":",
+ ";",
+ "<",
+ "=",
+ ">",
+ "?",
+ "@",
+ "[",
+ "\\",
+ "]",
+ "^",
+ "_",
+ "`",
+ "{",
+ "|",
+ "}",
+ "~",
+}
diff --git a/lib/markdown_it/rules_inline/fragments_join.py b/lib/markdown_it/rules_inline/fragments_join.py
new file mode 100644
index 0000000..f795c13
--- /dev/null
+++ b/lib/markdown_it/rules_inline/fragments_join.py
@@ -0,0 +1,43 @@
+from .state_inline import StateInline
+
+
+def fragments_join(state: StateInline) -> None:
+ """
+ Clean up tokens after emphasis and strikethrough postprocessing:
+ merge adjacent text nodes into one and re-calculate all token levels
+
+ This is necessary because initially emphasis delimiter markers (``*, _, ~``)
+ are treated as their own separate text tokens. Then emphasis rule either
+ leaves them as text (needed to merge with adjacent text) or turns them
+ into opening/closing tags (which messes up levels inside).
+ """
+ level = 0
+ maximum = len(state.tokens)
+
+ curr = last = 0
+ while curr < maximum:
+ # re-calculate levels after emphasis/strikethrough turns some text nodes
+ # into opening/closing tags
+ if state.tokens[curr].nesting < 0:
+ level -= 1 # closing tag
+ state.tokens[curr].level = level
+ if state.tokens[curr].nesting > 0:
+ level += 1 # opening tag
+
+ if (
+ state.tokens[curr].type == "text"
+ and curr + 1 < maximum
+ and state.tokens[curr + 1].type == "text"
+ ):
+ # collapse two adjacent text nodes
+ state.tokens[curr + 1].content = (
+ state.tokens[curr].content + state.tokens[curr + 1].content
+ )
+ else:
+ if curr != last:
+ state.tokens[last] = state.tokens[curr]
+ last += 1
+ curr += 1
+
+ if curr != last:
+ del state.tokens[last:]
diff --git a/lib/markdown_it/rules_inline/html_inline.py b/lib/markdown_it/rules_inline/html_inline.py
new file mode 100644
index 0000000..9065e1d
--- /dev/null
+++ b/lib/markdown_it/rules_inline/html_inline.py
@@ -0,0 +1,43 @@
+# Process html tags
+from ..common.html_re import HTML_TAG_RE
+from ..common.utils import isLinkClose, isLinkOpen
+from .state_inline import StateInline
+
+
+def isLetter(ch: int) -> bool:
+ lc = ch | 0x20 # to lower case
+ # /* a */ and /* z */
+ return (lc >= 0x61) and (lc <= 0x7A)
+
+
+def html_inline(state: StateInline, silent: bool) -> bool:
+ pos = state.pos
+
+ if not state.md.options.get("html", None):
+ return False
+
+ # Check start
+ maximum = state.posMax
+ if state.src[pos] != "<" or pos + 2 >= maximum:
+ return False
+
+ # Quick fail on second char
+ ch = state.src[pos + 1]
+ if ch not in ("!", "?", "/") and not isLetter(ord(ch)): # /* / */
+ return False
+
+ match = HTML_TAG_RE.search(state.src[pos:])
+ if not match:
+ return False
+
+ if not silent:
+ token = state.push("html_inline", "", 0)
+ token.content = state.src[pos : pos + len(match.group(0))]
+
+ if isLinkOpen(token.content):
+ state.linkLevel += 1
+ if isLinkClose(token.content):
+ state.linkLevel -= 1
+
+ state.pos += len(match.group(0))
+ return True
diff --git a/lib/markdown_it/rules_inline/image.py b/lib/markdown_it/rules_inline/image.py
new file mode 100644
index 0000000..005105b
--- /dev/null
+++ b/lib/markdown_it/rules_inline/image.py
@@ -0,0 +1,148 @@
+# Process 
+from __future__ import annotations
+
+from ..common.utils import isStrSpace, normalizeReference
+from ..token import Token
+from .state_inline import StateInline
+
+
+def image(state: StateInline, silent: bool) -> bool:
+ label = None
+ href = ""
+ oldPos = state.pos
+ max = state.posMax
+
+ if state.src[state.pos] != "!":
+ return False
+
+ if state.pos + 1 < state.posMax and state.src[state.pos + 1] != "[":
+ return False
+
+ labelStart = state.pos + 2
+ labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, False)
+
+ # parser failed to find ']', so it's not a valid link
+ if labelEnd < 0:
+ return False
+
+ pos = labelEnd + 1
+
+ if pos < max and state.src[pos] == "(":
+ #
+ # Inline link
+ #
+
+ # [link]( "title" )
+ # ^^ skipping these spaces
+ pos += 1
+ while pos < max:
+ ch = state.src[pos]
+ if not isStrSpace(ch) and ch != "\n":
+ break
+ pos += 1
+
+ if pos >= max:
+ return False
+
+ # [link]( "title" )
+ # ^^^^^^ parsing link destination
+ start = pos
+ res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax)
+ if res.ok:
+ href = state.md.normalizeLink(res.str)
+ if state.md.validateLink(href):
+ pos = res.pos
+ else:
+ href = ""
+
+ # [link]( "title" )
+ # ^^ skipping these spaces
+ start = pos
+ while pos < max:
+ ch = state.src[pos]
+ if not isStrSpace(ch) and ch != "\n":
+ break
+ pos += 1
+
+ # [link]( "title" )
+ # ^^^^^^^ parsing link title
+ res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax, None)
+ if pos < max and start != pos and res.ok:
+ title = res.str
+ pos = res.pos
+
+ # [link]( "title" )
+ # ^^ skipping these spaces
+ while pos < max:
+ ch = state.src[pos]
+ if not isStrSpace(ch) and ch != "\n":
+ break
+ pos += 1
+ else:
+ title = ""
+
+ if pos >= max or state.src[pos] != ")":
+ state.pos = oldPos
+ return False
+
+ pos += 1
+
+ else:
+ #
+ # Link reference
+ #
+ if "references" not in state.env:
+ return False
+
+ # /* [ */
+ if pos < max and state.src[pos] == "[":
+ start = pos + 1
+ pos = state.md.helpers.parseLinkLabel(state, pos)
+ if pos >= 0:
+ label = state.src[start:pos]
+ pos += 1
+ else:
+ pos = labelEnd + 1
+ else:
+ pos = labelEnd + 1
+
+ # covers label == '' and label == undefined
+ # (collapsed reference link and shortcut reference link respectively)
+ if not label:
+ label = state.src[labelStart:labelEnd]
+
+ label = normalizeReference(label)
+
+ ref = state.env["references"].get(label, None)
+ if not ref:
+ state.pos = oldPos
+ return False
+
+ href = ref["href"]
+ title = ref["title"]
+
+ #
+ # We found the end of the link, and know for a fact it's a valid link
+ # so all that's left to do is to call tokenizer.
+ #
+ if not silent:
+ content = state.src[labelStart:labelEnd]
+
+ tokens: list[Token] = []
+ state.md.inline.parse(content, state.md, state.env, tokens)
+
+ token = state.push("image", "img", 0)
+ token.attrs = {"src": href, "alt": ""}
+ token.children = tokens or None
+ token.content = content
+
+ if title:
+ token.attrSet("title", title)
+
+ # note, this is not part of markdown-it JS, but is useful for renderers
+ if label and state.md.options.get("store_labels", False):
+ token.meta["label"] = label
+
+ state.pos = pos
+ state.posMax = max
+ return True
diff --git a/lib/markdown_it/rules_inline/link.py b/lib/markdown_it/rules_inline/link.py
new file mode 100644
index 0000000..2e92c7d
--- /dev/null
+++ b/lib/markdown_it/rules_inline/link.py
@@ -0,0 +1,149 @@
+# Process [link]( "stuff")
+
+from ..common.utils import isStrSpace, normalizeReference
+from .state_inline import StateInline
+
+
+def link(state: StateInline, silent: bool) -> bool:
+ href = ""
+ title = ""
+ label = None
+ oldPos = state.pos
+ maximum = state.posMax
+ start = state.pos
+ parseReference = True
+
+ if state.src[state.pos] != "[":
+ return False
+
+ labelStart = state.pos + 1
+ labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, True)
+
+ # parser failed to find ']', so it's not a valid link
+ if labelEnd < 0:
+ return False
+
+ pos = labelEnd + 1
+
+ if pos < maximum and state.src[pos] == "(":
+ #
+ # Inline link
+ #
+
+ # might have found a valid shortcut link, disable reference parsing
+ parseReference = False
+
+ # [link]( "title" )
+ # ^^ skipping these spaces
+ pos += 1
+ while pos < maximum:
+ ch = state.src[pos]
+ if not isStrSpace(ch) and ch != "\n":
+ break
+ pos += 1
+
+ if pos >= maximum:
+ return False
+
+ # [link]( "title" )
+ # ^^^^^^ parsing link destination
+ start = pos
+ res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax)
+ if res.ok:
+ href = state.md.normalizeLink(res.str)
+ if state.md.validateLink(href):
+ pos = res.pos
+ else:
+ href = ""
+
+ # [link]( "title" )
+ # ^^ skipping these spaces
+ start = pos
+ while pos < maximum:
+ ch = state.src[pos]
+ if not isStrSpace(ch) and ch != "\n":
+ break
+ pos += 1
+
+ # [link]( "title" )
+ # ^^^^^^^ parsing link title
+ res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax)
+ if pos < maximum and start != pos and res.ok:
+ title = res.str
+ pos = res.pos
+
+ # [link]( "title" )
+ # ^^ skipping these spaces
+ while pos < maximum:
+ ch = state.src[pos]
+ if not isStrSpace(ch) and ch != "\n":
+ break
+ pos += 1
+
+ if pos >= maximum or state.src[pos] != ")":
+ # parsing a valid shortcut link failed, fallback to reference
+ parseReference = True
+
+ pos += 1
+
+ if parseReference:
+ #
+ # Link reference
+ #
+ if "references" not in state.env:
+ return False
+
+ if pos < maximum and state.src[pos] == "[":
+ start = pos + 1
+ pos = state.md.helpers.parseLinkLabel(state, pos)
+ if pos >= 0:
+ label = state.src[start:pos]
+ pos += 1
+ else:
+ pos = labelEnd + 1
+
+ else:
+ pos = labelEnd + 1
+
+ # covers label == '' and label == undefined
+ # (collapsed reference link and shortcut reference link respectively)
+ if not label:
+ label = state.src[labelStart:labelEnd]
+
+ label = normalizeReference(label)
+
+ ref = state.env["references"].get(label, None)
+ if not ref:
+ state.pos = oldPos
+ return False
+
+ href = ref["href"]
+ title = ref["title"]
+
+ #
+ # We found the end of the link, and know for a fact it's a valid link
+ # so all that's left to do is to call tokenizer.
+ #
+ if not silent:
+ state.pos = labelStart
+ state.posMax = labelEnd
+
+ token = state.push("link_open", "a", 1)
+ token.attrs = {"href": href}
+
+ if title:
+ token.attrSet("title", title)
+
+ # note, this is not part of markdown-it JS, but is useful for renderers
+ if label and state.md.options.get("store_labels", False):
+ token.meta["label"] = label
+
+ state.linkLevel += 1
+ state.md.inline.tokenize(state)
+ state.linkLevel -= 1
+
+ token = state.push("link_close", "a", -1)
+
+ state.pos = pos
+ state.posMax = maximum
+ return True
diff --git a/lib/markdown_it/rules_inline/linkify.py b/lib/markdown_it/rules_inline/linkify.py
new file mode 100644
index 0000000..3669396
--- /dev/null
+++ b/lib/markdown_it/rules_inline/linkify.py
@@ -0,0 +1,62 @@
+"""Process links like https://example.org/"""
+
+import re
+
+from .state_inline import StateInline
+
+# RFC3986: scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
+SCHEME_RE = re.compile(r"(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$", re.IGNORECASE)
+
+
+def linkify(state: StateInline, silent: bool) -> bool:
+ """Rule for identifying plain-text links."""
+ if not state.md.options.linkify:
+ return False
+ if state.linkLevel > 0:
+ return False
+ if not state.md.linkify:
+ raise ModuleNotFoundError("Linkify enabled but not installed.")
+
+ pos = state.pos
+ maximum = state.posMax
+
+ if (
+ (pos + 3) > maximum
+ or state.src[pos] != ":"
+ or state.src[pos + 1] != "/"
+ or state.src[pos + 2] != "/"
+ ):
+ return False
+
+ if not (match := SCHEME_RE.search(state.pending)):
+ return False
+
+ proto = match.group(1)
+ if not (link := state.md.linkify.match_at_start(state.src[pos - len(proto) :])):
+ return False
+ url: str = link.url
+
+ # disallow '*' at the end of the link (conflicts with emphasis)
+ url = url.rstrip("*")
+
+ full_url = state.md.normalizeLink(url)
+ if not state.md.validateLink(full_url):
+ return False
+
+ if not silent:
+ state.pending = state.pending[: -len(proto)]
+
+ token = state.push("link_open", "a", 1)
+ token.attrs = {"href": full_url}
+ token.markup = "linkify"
+ token.info = "auto"
+
+ token = state.push("text", "", 0)
+ token.content = state.md.normalizeLinkText(url)
+
+ token = state.push("link_close", "a", -1)
+ token.markup = "linkify"
+ token.info = "auto"
+
+ state.pos += len(url) - len(proto)
+ return True
diff --git a/lib/markdown_it/rules_inline/newline.py b/lib/markdown_it/rules_inline/newline.py
new file mode 100644
index 0000000..d05ee6d
--- /dev/null
+++ b/lib/markdown_it/rules_inline/newline.py
@@ -0,0 +1,44 @@
+"""Proceess '\n'."""
+
+from ..common.utils import charStrAt, isStrSpace
+from .state_inline import StateInline
+
+
+def newline(state: StateInline, silent: bool) -> bool:
+ pos = state.pos
+
+ if state.src[pos] != "\n":
+ return False
+
+ pmax = len(state.pending) - 1
+ maximum = state.posMax
+
+ # ' \n' -> hardbreak
+ # Lookup in pending chars is bad practice! Don't copy to other rules!
+ # Pending string is stored in concat mode, indexed lookups will cause
+ # conversion to flat mode.
+ if not silent:
+ if pmax >= 0 and charStrAt(state.pending, pmax) == " ":
+ if pmax >= 1 and charStrAt(state.pending, pmax - 1) == " ":
+ # Find whitespaces tail of pending chars.
+ ws = pmax - 1
+ while ws >= 1 and charStrAt(state.pending, ws - 1) == " ":
+ ws -= 1
+ state.pending = state.pending[:ws]
+
+ state.push("hardbreak", "br", 0)
+ else:
+ state.pending = state.pending[:-1]
+ state.push("softbreak", "br", 0)
+
+ else:
+ state.push("softbreak", "br", 0)
+
+ pos += 1
+
+ # skip heading spaces for next line
+ while pos < maximum and isStrSpace(state.src[pos]):
+ pos += 1
+
+ state.pos = pos
+ return True
diff --git a/lib/markdown_it/rules_inline/state_inline.py b/lib/markdown_it/rules_inline/state_inline.py
new file mode 100644
index 0000000..50dc412
--- /dev/null
+++ b/lib/markdown_it/rules_inline/state_inline.py
@@ -0,0 +1,165 @@
+from __future__ import annotations
+
+from collections import namedtuple
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Any, Literal
+
+from ..common.utils import isMdAsciiPunct, isPunctChar, isWhiteSpace
+from ..ruler import StateBase
+from ..token import Token
+from ..utils import EnvType
+
+if TYPE_CHECKING:
+ from markdown_it import MarkdownIt
+
+
+@dataclass(slots=True)
+class Delimiter:
+ # Char code of the starting marker (number).
+ marker: int
+
+ # Total length of these series of delimiters.
+ length: int
+
+ # A position of the token this delimiter corresponds to.
+ token: int
+
+ # If this delimiter is matched as a valid opener, `end` will be
+ # equal to its position, otherwise it's `-1`.
+ end: int
+
+ # Boolean flags that determine if this delimiter could open or close
+ # an emphasis.
+ open: bool
+ close: bool
+
+ level: bool | None = None
+
+
+Scanned = namedtuple("Scanned", ["can_open", "can_close", "length"])
+
+
+class StateInline(StateBase):
+ def __init__(
+ self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token]
+ ) -> None:
+ self.src = src
+ self.env = env
+ self.md = md
+ self.tokens = outTokens
+ self.tokens_meta: list[dict[str, Any] | None] = [None] * len(outTokens)
+
+ self.pos = 0
+ self.posMax = len(self.src)
+ self.level = 0
+ self.pending = ""
+ self.pendingLevel = 0
+
+ # Stores { start: end } pairs. Useful for backtrack
+ # optimization of pairs parse (emphasis, strikes).
+ self.cache: dict[int, int] = {}
+
+ # List of emphasis-like delimiters for current tag
+ self.delimiters: list[Delimiter] = []
+
+ # Stack of delimiter lists for upper level tags
+ self._prev_delimiters: list[list[Delimiter]] = []
+
+ # backticklength => last seen position
+ self.backticks: dict[int, int] = {}
+ self.backticksScanned = False
+
+ # Counter used to disable inline linkify-it execution
+ # inside and markdown links
+ self.linkLevel = 0
+
+ def __repr__(self) -> str:
+ return (
+ f"{self.__class__.__name__}"
+ f"(pos=[{self.pos} of {self.posMax}], token={len(self.tokens)})"
+ )
+
+ def pushPending(self) -> Token:
+ token = Token("text", "", 0)
+ token.content = self.pending
+ token.level = self.pendingLevel
+ self.tokens.append(token)
+ self.pending = ""
+ return token
+
+ def push(self, ttype: str, tag: str, nesting: Literal[-1, 0, 1]) -> Token:
+ """Push new token to "stream".
+ If pending text exists - flush it as text token
+ """
+ if self.pending:
+ self.pushPending()
+
+ token = Token(ttype, tag, nesting)
+ token_meta = None
+
+ if nesting < 0:
+ # closing tag
+ self.level -= 1
+ self.delimiters = self._prev_delimiters.pop()
+
+ token.level = self.level
+
+ if nesting > 0:
+ # opening tag
+ self.level += 1
+ self._prev_delimiters.append(self.delimiters)
+ self.delimiters = []
+ token_meta = {"delimiters": self.delimiters}
+
+ self.pendingLevel = self.level
+ self.tokens.append(token)
+ self.tokens_meta.append(token_meta)
+ return token
+
+ def scanDelims(self, start: int, canSplitWord: bool) -> Scanned:
+ """
+ Scan a sequence of emphasis-like markers, and determine whether
+ it can start an emphasis sequence or end an emphasis sequence.
+
+ - start - position to scan from (it should point at a valid marker);
+ - canSplitWord - determine if these markers can be found inside a word
+
+ """
+ pos = start
+ maximum = self.posMax
+ marker = self.src[start]
+
+ # treat beginning of the line as a whitespace
+ lastChar = self.src[start - 1] if start > 0 else " "
+
+ while pos < maximum and self.src[pos] == marker:
+ pos += 1
+
+ count = pos - start
+
+ # treat end of the line as a whitespace
+ nextChar = self.src[pos] if pos < maximum else " "
+
+ isLastPunctChar = isMdAsciiPunct(ord(lastChar)) or isPunctChar(lastChar)
+ isNextPunctChar = isMdAsciiPunct(ord(nextChar)) or isPunctChar(nextChar)
+
+ isLastWhiteSpace = isWhiteSpace(ord(lastChar))
+ isNextWhiteSpace = isWhiteSpace(ord(nextChar))
+
+ left_flanking = not (
+ isNextWhiteSpace
+ or (isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar))
+ )
+ right_flanking = not (
+ isLastWhiteSpace
+ or (isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar))
+ )
+
+ can_open = left_flanking and (
+ canSplitWord or (not right_flanking) or isLastPunctChar
+ )
+ can_close = right_flanking and (
+ canSplitWord or (not left_flanking) or isNextPunctChar
+ )
+
+ return Scanned(can_open, can_close, count)
diff --git a/lib/markdown_it/rules_inline/strikethrough.py b/lib/markdown_it/rules_inline/strikethrough.py
new file mode 100644
index 0000000..ec81628
--- /dev/null
+++ b/lib/markdown_it/rules_inline/strikethrough.py
@@ -0,0 +1,127 @@
+# ~~strike through~~
+from __future__ import annotations
+
+from .state_inline import Delimiter, StateInline
+
+
+def tokenize(state: StateInline, silent: bool) -> bool:
+ """Insert each marker as a separate text token, and add it to delimiter list"""
+ start = state.pos
+ ch = state.src[start]
+
+ if silent:
+ return False
+
+ if ch != "~":
+ return False
+
+ scanned = state.scanDelims(state.pos, True)
+ length = scanned.length
+
+ if length < 2:
+ return False
+
+ if length % 2:
+ token = state.push("text", "", 0)
+ token.content = ch
+ length -= 1
+
+ i = 0
+ while i < length:
+ token = state.push("text", "", 0)
+ token.content = ch + ch
+ state.delimiters.append(
+ Delimiter(
+ marker=ord(ch),
+ length=0, # disable "rule of 3" length checks meant for emphasis
+ token=len(state.tokens) - 1,
+ end=-1,
+ open=scanned.can_open,
+ close=scanned.can_close,
+ )
+ )
+
+ i += 2
+
+ state.pos += scanned.length
+
+ return True
+
+
+def _postProcess(state: StateInline, delimiters: list[Delimiter]) -> None:
+ loneMarkers = []
+ maximum = len(delimiters)
+
+ i = 0
+ while i < maximum:
+ startDelim = delimiters[i]
+
+ if startDelim.marker != 0x7E: # /* ~ */
+ i += 1
+ continue
+
+ if startDelim.end == -1:
+ i += 1
+ continue
+
+ endDelim = delimiters[startDelim.end]
+
+ token = state.tokens[startDelim.token]
+ token.type = "s_open"
+ token.tag = "s"
+ token.nesting = 1
+ token.markup = "~~"
+ token.content = ""
+
+ token = state.tokens[endDelim.token]
+ token.type = "s_close"
+ token.tag = "s"
+ token.nesting = -1
+ token.markup = "~~"
+ token.content = ""
+
+ if (
+ state.tokens[endDelim.token - 1].type == "text"
+ and state.tokens[endDelim.token - 1].content == "~"
+ ):
+ loneMarkers.append(endDelim.token - 1)
+
+ i += 1
+
+ # If a marker sequence has an odd number of characters, it's split
+ # like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the
+ # start of the sequence.
+ #
+ # So, we have to move all those markers after subsequent s_close tags.
+ #
+ while loneMarkers:
+ i = loneMarkers.pop()
+ j = i + 1
+
+ while (j < len(state.tokens)) and (state.tokens[j].type == "s_close"):
+ j += 1
+
+ j -= 1
+
+ if i != j:
+ token = state.tokens[j]
+ state.tokens[j] = state.tokens[i]
+ state.tokens[i] = token
+
+
+def postProcess(state: StateInline) -> None:
+ """Walk through delimiter list and replace text tokens with tags."""
+ tokens_meta = state.tokens_meta
+ maximum = len(state.tokens_meta)
+ _postProcess(state, state.delimiters)
+
+ curr = 0
+ while curr < maximum:
+ try:
+ curr_meta = tokens_meta[curr]
+ except IndexError:
+ pass
+ else:
+ if curr_meta and "delimiters" in curr_meta:
+ _postProcess(state, curr_meta["delimiters"])
+ curr += 1
diff --git a/lib/markdown_it/rules_inline/text.py b/lib/markdown_it/rules_inline/text.py
new file mode 100644
index 0000000..18b2fcc
--- /dev/null
+++ b/lib/markdown_it/rules_inline/text.py
@@ -0,0 +1,62 @@
+import functools
+import re
+
+# Skip text characters for text token, place those to pending buffer
+# and increment current pos
+from .state_inline import StateInline
+
+# Rule to skip pure text
+# '{}$%@~+=:' reserved for extensions
+
+# !!!! Don't confuse with "Markdown ASCII Punctuation" chars
+# http://spec.commonmark.org/0.15/#ascii-punctuation-character
+
+
+_TerminatorChars = {
+ "\n",
+ "!",
+ "#",
+ "$",
+ "%",
+ "&",
+ "*",
+ "+",
+ "-",
+ ":",
+ "<",
+ "=",
+ ">",
+ "@",
+ "[",
+ "\\",
+ "]",
+ "^",
+ "_",
+ "`",
+ "{",
+ "}",
+ "~",
+}
+
+
+@functools.cache
+def _terminator_char_regex() -> re.Pattern[str]:
+ return re.compile("[" + re.escape("".join(_TerminatorChars)) + "]")
+
+
+def text(state: StateInline, silent: bool) -> bool:
+ pos = state.pos
+ posMax = state.posMax
+
+ terminator_char = _terminator_char_regex().search(state.src, pos)
+ pos = terminator_char.start() if terminator_char else posMax
+
+ if pos == state.pos:
+ return False
+
+ if not silent:
+ state.pending += state.src[state.pos : pos]
+
+ state.pos = pos
+
+ return True
diff --git a/lib/markdown_it/token.py b/lib/markdown_it/token.py
new file mode 100644
index 0000000..d6d0b45
--- /dev/null
+++ b/lib/markdown_it/token.py
@@ -0,0 +1,178 @@
+from __future__ import annotations
+
+from collections.abc import Callable, MutableMapping
+import dataclasses as dc
+from typing import Any, Literal
+import warnings
+
+
+def convert_attrs(value: Any) -> Any:
+ """Convert Token.attrs set as ``None`` or ``[[key, value], ...]`` to a dict.
+
+ This improves compatibility with upstream markdown-it.
+ """
+ if not value:
+ return {}
+ if isinstance(value, list):
+ return dict(value)
+ return value
+
+
+@dc.dataclass(slots=True)
+class Token:
+ type: str
+ """Type of the token (string, e.g. "paragraph_open")"""
+
+ tag: str
+ """HTML tag name, e.g. 'p'"""
+
+ nesting: Literal[-1, 0, 1]
+ """Level change (number in {-1, 0, 1} set), where:
+ - `1` means the tag is opening
+ - `0` means the tag is self-closing
+ - `-1` means the tag is closing
+ """
+
+ attrs: dict[str, str | int | float] = dc.field(default_factory=dict)
+ """HTML attributes.
+ Note this differs from the upstream "list of lists" format,
+ although than an instance can still be initialised with this format.
+ """
+
+ map: list[int] | None = None
+ """Source map info. Format: `[ line_begin, line_end ]`"""
+
+ level: int = 0
+ """Nesting level, the same as `state.level`"""
+
+ children: list[Token] | None = None
+ """Array of child nodes (inline and img tokens)."""
+
+ content: str = ""
+ """Inner content, in the case of a self-closing tag (code, html, fence, etc.),"""
+
+ markup: str = ""
+ """'*' or '_' for emphasis, fence string for fence, etc."""
+
+ info: str = ""
+ """Additional information:
+ - Info string for "fence" tokens
+ - The value "auto" for autolink "link_open" and "link_close" tokens
+ - The string value of the item marker for ordered-list "list_item_open" tokens
+ """
+
+ meta: dict[Any, Any] = dc.field(default_factory=dict)
+ """A place for plugins to store any arbitrary data"""
+
+ block: bool = False
+ """True for block-level tokens, false for inline tokens.
+ Used in renderer to calculate line breaks
+ """
+
+ hidden: bool = False
+ """If true, ignore this element when rendering.
+ Used for tight lists to hide paragraphs.
+ """
+
+ def __post_init__(self) -> None:
+ self.attrs = convert_attrs(self.attrs)
+
+ def attrIndex(self, name: str) -> int:
+ warnings.warn( # noqa: B028
+ "Token.attrIndex should not be used, since Token.attrs is a dictionary",
+ UserWarning,
+ )
+ if name not in self.attrs:
+ return -1
+ return list(self.attrs.keys()).index(name)
+
+ def attrItems(self) -> list[tuple[str, str | int | float]]:
+ """Get (key, value) list of attrs."""
+ return list(self.attrs.items())
+
+ def attrPush(self, attrData: tuple[str, str | int | float]) -> None:
+ """Add `[ name, value ]` attribute to list. Init attrs if necessary."""
+ name, value = attrData
+ self.attrSet(name, value)
+
+ def attrSet(self, name: str, value: str | int | float) -> None:
+ """Set `name` attribute to `value`. Override old value if exists."""
+ self.attrs[name] = value
+
+ def attrGet(self, name: str) -> None | str | int | float:
+ """Get the value of attribute `name`, or null if it does not exist."""
+ return self.attrs.get(name, None)
+
+ def attrJoin(self, name: str, value: str) -> None:
+ """Join value to existing attribute via space.
+ Or create new attribute if not exists.
+ Useful to operate with token classes.
+ """
+ if name in self.attrs:
+ current = self.attrs[name]
+ if not isinstance(current, str):
+ raise TypeError(
+ f"existing attr 'name' is not a str: {self.attrs[name]}"
+ )
+ self.attrs[name] = f"{current} {value}"
+ else:
+ self.attrs[name] = value
+
+ def copy(self, **changes: Any) -> Token:
+ """Return a shallow copy of the instance."""
+ return dc.replace(self, **changes)
+
+ def as_dict(
+ self,
+ *,
+ children: bool = True,
+ as_upstream: bool = True,
+ meta_serializer: Callable[[dict[Any, Any]], Any] | None = None,
+ filter: Callable[[str, Any], bool] | None = None,
+ dict_factory: Callable[..., MutableMapping[str, Any]] = dict,
+ ) -> MutableMapping[str, Any]:
+ """Return the token as a dictionary.
+
+ :param children: Also convert children to dicts
+ :param as_upstream: Ensure the output dictionary is equal to that created by markdown-it
+ For example, attrs are converted to null or lists
+ :param meta_serializer: hook for serializing ``Token.meta``
+ :param filter: A callable whose return code determines whether an
+ attribute or element is included (``True``) or dropped (``False``).
+ Is called with the (key, value) pair.
+ :param dict_factory: A callable to produce dictionaries from.
+ For example, to produce ordered dictionaries instead of normal Python
+ dictionaries, pass in ``collections.OrderedDict``.
+
+ """
+ mapping = dict_factory((f.name, getattr(self, f.name)) for f in dc.fields(self))
+ if filter:
+ mapping = dict_factory((k, v) for k, v in mapping.items() if filter(k, v))
+ if as_upstream and "attrs" in mapping:
+ mapping["attrs"] = (
+ None
+ if not mapping["attrs"]
+ else [[k, v] for k, v in mapping["attrs"].items()]
+ )
+ if meta_serializer and "meta" in mapping:
+ mapping["meta"] = meta_serializer(mapping["meta"])
+ if children and mapping.get("children", None):
+ mapping["children"] = [
+ child.as_dict(
+ children=children,
+ filter=filter,
+ dict_factory=dict_factory,
+ as_upstream=as_upstream,
+ meta_serializer=meta_serializer,
+ )
+ for child in mapping["children"]
+ ]
+ return mapping
+
+ @classmethod
+ def from_dict(cls, dct: MutableMapping[str, Any]) -> Token:
+ """Convert a dict to a Token."""
+ token = cls(**dct)
+ if token.children:
+ token.children = [cls.from_dict(c) for c in token.children] # type: ignore[arg-type]
+ return token
diff --git a/lib/markdown_it/tree.py b/lib/markdown_it/tree.py
new file mode 100644
index 0000000..5369157
--- /dev/null
+++ b/lib/markdown_it/tree.py
@@ -0,0 +1,333 @@
+"""A tree representation of a linear markdown-it token stream.
+
+This module is not part of upstream JavaScript markdown-it.
+"""
+
+from __future__ import annotations
+
+from collections.abc import Generator, Sequence
+import textwrap
+from typing import Any, NamedTuple, TypeVar, overload
+
+from .token import Token
+
+
+class _NesterTokens(NamedTuple):
+ opening: Token
+ closing: Token
+
+
+_NodeType = TypeVar("_NodeType", bound="SyntaxTreeNode")
+
+
+class SyntaxTreeNode:
+ """A Markdown syntax tree node.
+
+ A class that can be used to construct a tree representation of a linear
+ `markdown-it-py` token stream.
+
+ Each node in the tree represents either:
+ - root of the Markdown document
+ - a single unnested `Token`
+ - a `Token` "_open" and "_close" token pair, and the tokens nested in
+ between
+ """
+
+ def __init__(
+ self, tokens: Sequence[Token] = (), *, create_root: bool = True
+ ) -> None:
+ """Initialize a `SyntaxTreeNode` from a token stream.
+
+ If `create_root` is True, create a root node for the document.
+ """
+ # Only nodes representing an unnested token have self.token
+ self.token: Token | None = None
+
+ # Only containers have nester tokens
+ self.nester_tokens: _NesterTokens | None = None
+
+ # Root node does not have self.parent
+ self._parent: Any = None
+
+ # Empty list unless a non-empty container, or unnested token that has
+ # children (i.e. inline or img)
+ self._children: list[Any] = []
+
+ if create_root:
+ self._set_children_from_tokens(tokens)
+ return
+
+ if not tokens:
+ raise ValueError(
+ "Can only create root from empty token sequence."
+ " Set `create_root=True`."
+ )
+ elif len(tokens) == 1:
+ inline_token = tokens[0]
+ if inline_token.nesting:
+ raise ValueError(
+ "Unequal nesting level at the start and end of token stream."
+ )
+ self.token = inline_token
+ if inline_token.children:
+ self._set_children_from_tokens(inline_token.children)
+ else:
+ self.nester_tokens = _NesterTokens(tokens[0], tokens[-1])
+ self._set_children_from_tokens(tokens[1:-1])
+
+ def __repr__(self) -> str:
+ return f"{type(self).__name__}({self.type})"
+
+ @overload
+ def __getitem__(self: _NodeType, item: int) -> _NodeType: ...
+
+ @overload
+ def __getitem__(self: _NodeType, item: slice) -> list[_NodeType]: ...
+
+ def __getitem__(self: _NodeType, item: int | slice) -> _NodeType | list[_NodeType]:
+ return self.children[item]
+
+ def to_tokens(self: _NodeType) -> list[Token]:
+ """Recover the linear token stream."""
+
+ def recursive_collect_tokens(node: _NodeType, token_list: list[Token]) -> None:
+ if node.type == "root":
+ for child in node.children:
+ recursive_collect_tokens(child, token_list)
+ elif node.token:
+ token_list.append(node.token)
+ else:
+ assert node.nester_tokens
+ token_list.append(node.nester_tokens.opening)
+ for child in node.children:
+ recursive_collect_tokens(child, token_list)
+ token_list.append(node.nester_tokens.closing)
+
+ tokens: list[Token] = []
+ recursive_collect_tokens(self, tokens)
+ return tokens
+
+ @property
+ def children(self: _NodeType) -> list[_NodeType]:
+ return self._children
+
+ @children.setter
+ def children(self: _NodeType, value: list[_NodeType]) -> None:
+ self._children = value
+
+ @property
+ def parent(self: _NodeType) -> _NodeType | None:
+ return self._parent # type: ignore
+
+ @parent.setter
+ def parent(self: _NodeType, value: _NodeType | None) -> None:
+ self._parent = value
+
+ @property
+ def is_root(self) -> bool:
+ """Is the node a special root node?"""
+ return not (self.token or self.nester_tokens)
+
+ @property
+ def is_nested(self) -> bool:
+ """Is this node nested?.
+
+ Returns `True` if the node represents a `Token` pair and tokens in the
+ sequence between them, where `Token.nesting` of the first `Token` in
+ the pair is 1 and nesting of the other `Token` is -1.
+ """
+ return bool(self.nester_tokens)
+
+ @property
+ def siblings(self: _NodeType) -> Sequence[_NodeType]:
+ """Get siblings of the node.
+
+ Gets the whole group of siblings, including self.
+ """
+ if not self.parent:
+ return [self]
+ return self.parent.children
+
+ @property
+ def type(self) -> str:
+ """Get a string type of the represented syntax.
+
+ - "root" for root nodes
+ - `Token.type` if the node represents an unnested token
+ - `Token.type` of the opening token, with "_open" suffix stripped, if
+ the node represents a nester token pair
+ """
+ if self.is_root:
+ return "root"
+ if self.token:
+ return self.token.type
+ assert self.nester_tokens
+ return self.nester_tokens.opening.type.removesuffix("_open")
+
+ @property
+ def next_sibling(self: _NodeType) -> _NodeType | None:
+ """Get the next node in the sequence of siblings.
+
+ Returns `None` if this is the last sibling.
+ """
+ self_index = self.siblings.index(self)
+ if self_index + 1 < len(self.siblings):
+ return self.siblings[self_index + 1]
+ return None
+
+ @property
+ def previous_sibling(self: _NodeType) -> _NodeType | None:
+ """Get the previous node in the sequence of siblings.
+
+ Returns `None` if this is the first sibling.
+ """
+ self_index = self.siblings.index(self)
+ if self_index - 1 >= 0:
+ return self.siblings[self_index - 1]
+ return None
+
+ def _add_child(
+ self,
+ tokens: Sequence[Token],
+ ) -> None:
+ """Make a child node for `self`."""
+ child = type(self)(tokens, create_root=False)
+ child.parent = self
+ self.children.append(child)
+
+ def _set_children_from_tokens(self, tokens: Sequence[Token]) -> None:
+ """Convert the token stream to a tree structure and set the resulting
+ nodes as children of `self`."""
+ reversed_tokens = list(reversed(tokens))
+ while reversed_tokens:
+ token = reversed_tokens.pop()
+
+ if not token.nesting:
+ self._add_child([token])
+ continue
+ if token.nesting != 1:
+ raise ValueError("Invalid token nesting")
+
+ nested_tokens = [token]
+ nesting = 1
+ while reversed_tokens and nesting:
+ token = reversed_tokens.pop()
+ nested_tokens.append(token)
+ nesting += token.nesting
+ if nesting:
+ raise ValueError(f"unclosed tokens starting {nested_tokens[0]}")
+
+ self._add_child(nested_tokens)
+
+ def pretty(
+ self, *, indent: int = 2, show_text: bool = False, _current: int = 0
+ ) -> str:
+ """Create an XML style string of the tree."""
+ prefix = " " * _current
+ text = prefix + f"<{self.type}"
+ if not self.is_root and self.attrs:
+ text += " " + " ".join(f"{k}={v!r}" for k, v in self.attrs.items())
+ text += ">"
+ if (
+ show_text
+ and not self.is_root
+ and self.type in ("text", "text_special")
+ and self.content
+ ):
+ text += "\n" + textwrap.indent(self.content, prefix + " " * indent)
+ for child in self.children:
+ text += "\n" + child.pretty(
+ indent=indent, show_text=show_text, _current=_current + indent
+ )
+ return text
+
+ def walk(
+ self: _NodeType, *, include_self: bool = True
+ ) -> Generator[_NodeType, None, None]:
+ """Recursively yield all descendant nodes in the tree starting at self.
+
+ The order mimics the order of the underlying linear token
+ stream (i.e. depth first).
+ """
+ if include_self:
+ yield self
+ for child in self.children:
+ yield from child.walk(include_self=True)
+
+ # NOTE:
+ # The values of the properties defined below directly map to properties
+ # of the underlying `Token`s. A root node does not translate to a `Token`
+ # object, so calling these property getters on a root node will raise an
+ # `AttributeError`.
+ #
+ # There is no mapping for `Token.nesting` because the `is_nested` property
+ # provides that data, and can be called on any node type, including root.
+
+ def _attribute_token(self) -> Token:
+ """Return the `Token` that is used as the data source for the
+ properties defined below."""
+ if self.token:
+ return self.token
+ if self.nester_tokens:
+ return self.nester_tokens.opening
+ raise AttributeError("Root node does not have the accessed attribute")
+
+ @property
+ def tag(self) -> str:
+ """html tag name, e.g. \"p\" """
+ return self._attribute_token().tag
+
+ @property
+ def attrs(self) -> dict[str, str | int | float]:
+ """Html attributes."""
+ return self._attribute_token().attrs
+
+ def attrGet(self, name: str) -> None | str | int | float:
+ """Get the value of attribute `name`, or null if it does not exist."""
+ return self._attribute_token().attrGet(name)
+
+ @property
+ def map(self) -> tuple[int, int] | None:
+ """Source map info. Format: `tuple[ line_begin, line_end ]`"""
+ map_ = self._attribute_token().map
+ if map_:
+ # Type ignore because `Token`s attribute types are not perfect
+ return tuple(map_) # type: ignore
+ return None
+
+ @property
+ def level(self) -> int:
+ """nesting level, the same as `state.level`"""
+ return self._attribute_token().level
+
+ @property
+ def content(self) -> str:
+ """In a case of self-closing tag (code, html, fence, etc.), it
+ has contents of this tag."""
+ return self._attribute_token().content
+
+ @property
+ def markup(self) -> str:
+ """'*' or '_' for emphasis, fence string for fence, etc."""
+ return self._attribute_token().markup
+
+ @property
+ def info(self) -> str:
+ """fence infostring"""
+ return self._attribute_token().info
+
+ @property
+ def meta(self) -> dict[Any, Any]:
+ """A place for plugins to store an arbitrary data."""
+ return self._attribute_token().meta
+
+ @property
+ def block(self) -> bool:
+ """True for block-level tokens, false for inline tokens."""
+ return self._attribute_token().block
+
+ @property
+ def hidden(self) -> bool:
+ """If it's true, ignore this element when rendering.
+ Used for tight lists to hide paragraphs."""
+ return self._attribute_token().hidden
diff --git a/lib/markdown_it/utils.py b/lib/markdown_it/utils.py
new file mode 100644
index 0000000..2571a15
--- /dev/null
+++ b/lib/markdown_it/utils.py
@@ -0,0 +1,186 @@
+from __future__ import annotations
+
+from collections.abc import Callable, Iterable, MutableMapping
+from collections.abc import MutableMapping as MutableMappingABC
+from pathlib import Path
+from typing import TYPE_CHECKING, Any, TypedDict, cast
+
+if TYPE_CHECKING:
+ from typing_extensions import NotRequired
+
+
+EnvType = MutableMapping[str, Any] # note: could use TypeAlias in python 3.10
+"""Type for the environment sandbox used in parsing and rendering,
+which stores mutable variables for use by plugins and rules.
+"""
+
+
+class OptionsType(TypedDict):
+ """Options for parsing."""
+
+ maxNesting: int
+ """Internal protection, recursion limit."""
+ html: bool
+ """Enable HTML tags in source."""
+ linkify: bool
+ """Enable autoconversion of URL-like texts to links."""
+ typographer: bool
+ """Enable smartquotes and replacements."""
+ quotes: str
+ """Quote characters."""
+ xhtmlOut: bool
+ """Use '/' to close single tags (
)."""
+ breaks: bool
+ """Convert newlines in paragraphs into
."""
+ langPrefix: str
+ """CSS language prefix for fenced blocks."""
+ highlight: Callable[[str, str, str], str] | None
+ """Highlighter function: (content, lang, attrs) -> str."""
+ store_labels: NotRequired[bool]
+ """Store link label in link/image token's metadata (under Token.meta['label']).
+
+ This is a Python only option, and is intended for the use of round-trip parsing.
+ """
+
+
+class PresetType(TypedDict):
+ """Preset configuration for markdown-it."""
+
+ options: OptionsType
+ """Options for parsing."""
+ components: MutableMapping[str, MutableMapping[str, list[str]]]
+ """Components for parsing and rendering."""
+
+
+class OptionsDict(MutableMappingABC): # type: ignore
+ """A dictionary, with attribute access to core markdownit configuration options."""
+
+ # Note: ideally we would probably just remove attribute access entirely,
+ # but we keep it for backwards compatibility.
+
+ def __init__(self, options: OptionsType) -> None:
+ self._options = cast(OptionsType, dict(options))
+
+ def __getitem__(self, key: str) -> Any:
+ return self._options[key] # type: ignore[literal-required]
+
+ def __setitem__(self, key: str, value: Any) -> None:
+ self._options[key] = value # type: ignore[literal-required]
+
+ def __delitem__(self, key: str) -> None:
+ del self._options[key] # type: ignore
+
+ def __iter__(self) -> Iterable[str]: # type: ignore
+ return iter(self._options)
+
+ def __len__(self) -> int:
+ return len(self._options)
+
+ def __repr__(self) -> str:
+ return repr(self._options)
+
+ def __str__(self) -> str:
+ return str(self._options)
+
+ @property
+ def maxNesting(self) -> int:
+ """Internal protection, recursion limit."""
+ return self._options["maxNesting"]
+
+ @maxNesting.setter
+ def maxNesting(self, value: int) -> None:
+ self._options["maxNesting"] = value
+
+ @property
+ def html(self) -> bool:
+ """Enable HTML tags in source."""
+ return self._options["html"]
+
+ @html.setter
+ def html(self, value: bool) -> None:
+ self._options["html"] = value
+
+ @property
+ def linkify(self) -> bool:
+ """Enable autoconversion of URL-like texts to links."""
+ return self._options["linkify"]
+
+ @linkify.setter
+ def linkify(self, value: bool) -> None:
+ self._options["linkify"] = value
+
+ @property
+ def typographer(self) -> bool:
+ """Enable smartquotes and replacements."""
+ return self._options["typographer"]
+
+ @typographer.setter
+ def typographer(self, value: bool) -> None:
+ self._options["typographer"] = value
+
+ @property
+ def quotes(self) -> str:
+ """Quote characters."""
+ return self._options["quotes"]
+
+ @quotes.setter
+ def quotes(self, value: str) -> None:
+ self._options["quotes"] = value
+
+ @property
+ def xhtmlOut(self) -> bool:
+ """Use '/' to close single tags (
)."""
+ return self._options["xhtmlOut"]
+
+ @xhtmlOut.setter
+ def xhtmlOut(self, value: bool) -> None:
+ self._options["xhtmlOut"] = value
+
+ @property
+ def breaks(self) -> bool:
+ """Convert newlines in paragraphs into
."""
+ return self._options["breaks"]
+
+ @breaks.setter
+ def breaks(self, value: bool) -> None:
+ self._options["breaks"] = value
+
+ @property
+ def langPrefix(self) -> str:
+ """CSS language prefix for fenced blocks."""
+ return self._options["langPrefix"]
+
+ @langPrefix.setter
+ def langPrefix(self, value: str) -> None:
+ self._options["langPrefix"] = value
+
+ @property
+ def highlight(self) -> Callable[[str, str, str], str] | None:
+ """Highlighter function: (content, langName, langAttrs) -> escaped HTML."""
+ return self._options["highlight"]
+
+ @highlight.setter
+ def highlight(self, value: Callable[[str, str, str], str] | None) -> None:
+ self._options["highlight"] = value
+
+
+def read_fixture_file(path: str | Path) -> list[list[Any]]:
+ text = Path(path).read_text(encoding="utf-8")
+ tests = []
+ section = 0
+ last_pos = 0
+ lines = text.splitlines(keepends=True)
+ for i in range(len(lines)):
+ if lines[i].rstrip() == ".":
+ if section == 0:
+ tests.append([i, lines[i - 1].strip()])
+ section = 1
+ elif section == 1:
+ tests[-1].append("".join(lines[last_pos + 1 : i]))
+ section = 2
+ elif section == 2:
+ tests[-1].append("".join(lines[last_pos + 1 : i]))
+ section = 0
+
+ last_pos = i
+ return tests
diff --git a/lib/markdown_it_py-4.0.0.dist-info/INSTALLER b/lib/markdown_it_py-4.0.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/lib/markdown_it_py-4.0.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/markdown_it_py-4.0.0.dist-info/METADATA b/lib/markdown_it_py-4.0.0.dist-info/METADATA
new file mode 100644
index 0000000..0f2b466
--- /dev/null
+++ b/lib/markdown_it_py-4.0.0.dist-info/METADATA
@@ -0,0 +1,219 @@
+Metadata-Version: 2.4
+Name: markdown-it-py
+Version: 4.0.0
+Summary: Python port of markdown-it. Markdown parsing, done right!
+Keywords: markdown,lexer,parser,commonmark,markdown-it
+Author-email: Chris Sewell
+Requires-Python: >=3.10
+Description-Content-Type: text/markdown
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+License-File: LICENSE
+License-File: LICENSE.markdown-it
+Requires-Dist: mdurl~=0.1
+Requires-Dist: psutil ; extra == "benchmarking"
+Requires-Dist: pytest ; extra == "benchmarking"
+Requires-Dist: pytest-benchmark ; extra == "benchmarking"
+Requires-Dist: commonmark~=0.9 ; extra == "compare"
+Requires-Dist: markdown~=3.4 ; extra == "compare"
+Requires-Dist: mistletoe~=1.0 ; extra == "compare"
+Requires-Dist: mistune~=3.0 ; extra == "compare"
+Requires-Dist: panflute~=2.3 ; extra == "compare"
+Requires-Dist: markdown-it-pyrs ; extra == "compare"
+Requires-Dist: linkify-it-py>=1,<3 ; extra == "linkify"
+Requires-Dist: mdit-py-plugins>=0.5.0 ; extra == "plugins"
+Requires-Dist: gprof2dot ; extra == "profiling"
+Requires-Dist: mdit-py-plugins>=0.5.0 ; extra == "rtd"
+Requires-Dist: myst-parser ; extra == "rtd"
+Requires-Dist: pyyaml ; extra == "rtd"
+Requires-Dist: sphinx ; extra == "rtd"
+Requires-Dist: sphinx-copybutton ; extra == "rtd"
+Requires-Dist: sphinx-design ; extra == "rtd"
+Requires-Dist: sphinx-book-theme~=1.0 ; extra == "rtd"
+Requires-Dist: jupyter_sphinx ; extra == "rtd"
+Requires-Dist: ipykernel ; extra == "rtd"
+Requires-Dist: coverage ; extra == "testing"
+Requires-Dist: pytest ; extra == "testing"
+Requires-Dist: pytest-cov ; extra == "testing"
+Requires-Dist: pytest-regressions ; extra == "testing"
+Requires-Dist: requests ; extra == "testing"
+Project-URL: Documentation, https://markdown-it-py.readthedocs.io
+Project-URL: Homepage, https://github.com/executablebooks/markdown-it-py
+Provides-Extra: benchmarking
+Provides-Extra: compare
+Provides-Extra: linkify
+Provides-Extra: plugins
+Provides-Extra: profiling
+Provides-Extra: rtd
+Provides-Extra: testing
+
+# markdown-it-py
+
+[![Github-CI][github-ci]][github-link]
+[![Coverage Status][codecov-badge]][codecov-link]
+[![PyPI][pypi-badge]][pypi-link]
+[![Conda][conda-badge]][conda-link]
+[![PyPI - Downloads][install-badge]][install-link]
+
+
+
+
+
+> Markdown parser done right.
+
+- Follows the __[CommonMark spec](http://spec.commonmark.org/)__ for baseline parsing
+- Configurable syntax: you can add new rules and even replace existing ones.
+- Pluggable: Adds syntax extensions to extend the parser (see the [plugin list][md-plugins]).
+- High speed (see our [benchmarking tests][md-performance])
+- Easy to configure for [security][md-security]
+- Member of [Google's Assured Open Source Software](https://cloud.google.com/assured-open-source-software/docs/supported-packages)
+
+This is a Python port of [markdown-it], and some of its associated plugins.
+For more details see: .
+
+For details on [markdown-it] itself, see:
+
+- The __[Live demo](https://markdown-it.github.io)__
+- [The markdown-it README][markdown-it-readme]
+
+**See also:** [markdown-it-pyrs](https://github.com/chrisjsewell/markdown-it-pyrs) for an experimental Rust binding,
+for even more speed!
+
+## Installation
+
+### PIP
+
+```bash
+pip install markdown-it-py[plugins]
+```
+
+or with extras
+
+```bash
+pip install markdown-it-py[linkify,plugins]
+```
+
+### Conda
+
+```bash
+conda install -c conda-forge markdown-it-py
+```
+
+or with extras
+
+```bash
+conda install -c conda-forge markdown-it-py linkify-it-py mdit-py-plugins
+```
+
+## Usage
+
+### Python API Usage
+
+Render markdown to HTML with markdown-it-py and a custom configuration
+with and without plugins and features:
+
+```python
+from markdown_it import MarkdownIt
+from mdit_py_plugins.front_matter import front_matter_plugin
+from mdit_py_plugins.footnote import footnote_plugin
+
+md = (
+ MarkdownIt('commonmark', {'breaks':True,'html':True})
+ .use(front_matter_plugin)
+ .use(footnote_plugin)
+ .enable('table')
+)
+text = ("""
+---
+a: 1
+---
+
+a | b
+- | -
+1 | 2
+
+A footnote [^1]
+
+[^1]: some details
+""")
+tokens = md.parse(text)
+html_text = md.render(text)
+
+## To export the html to a file, uncomment the lines below:
+# from pathlib import Path
+# Path("output.html").write_text(html_text)
+```
+
+### Command-line Usage
+
+Render markdown to HTML with markdown-it-py from the
+command-line:
+
+```console
+usage: markdown-it [-h] [-v] [filenames [filenames ...]]
+
+Parse one or more markdown files, convert each to HTML, and print to stdout
+
+positional arguments:
+ filenames specify an optional list of files to convert
+
+optional arguments:
+ -h, --help show this help message and exit
+ -v, --version show program's version number and exit
+
+Interactive:
+
+ $ markdown-it
+ markdown-it-py [version 0.0.0] (interactive)
+ Type Ctrl-D to complete input, or Ctrl-C to exit.
+ >>> # Example
+ ... > markdown *input*
+ ...
+ Example
+
+ markdown input
+
+
+Batch:
+
+ $ markdown-it README.md README.footer.md > index.html
+
+```
+
+## References / Thanks
+
+Big thanks to the authors of [markdown-it]:
+
+- Alex Kocharin [github/rlidwka](https://github.com/rlidwka)
+- Vitaly Puzrin [github/puzrin](https://github.com/puzrin)
+
+Also [John MacFarlane](https://github.com/jgm) for his work on the CommonMark spec and reference implementations.
+
+[github-ci]: https://github.com/executablebooks/markdown-it-py/actions/workflows/tests.yml/badge.svg?branch=master
+[github-link]: https://github.com/executablebooks/markdown-it-py
+[pypi-badge]: https://img.shields.io/pypi/v/markdown-it-py.svg
+[pypi-link]: https://pypi.org/project/markdown-it-py
+[conda-badge]: https://anaconda.org/conda-forge/markdown-it-py/badges/version.svg
+[conda-link]: https://anaconda.org/conda-forge/markdown-it-py
+[codecov-badge]: https://codecov.io/gh/executablebooks/markdown-it-py/branch/master/graph/badge.svg
+[codecov-link]: https://codecov.io/gh/executablebooks/markdown-it-py
+[install-badge]: https://img.shields.io/pypi/dw/markdown-it-py?label=pypi%20installs
+[install-link]: https://pypistats.org/packages/markdown-it-py
+
+[CommonMark spec]: http://spec.commonmark.org/
+[markdown-it]: https://github.com/markdown-it/markdown-it
+[markdown-it-readme]: https://github.com/markdown-it/markdown-it/blob/master/README.md
+[md-security]: https://markdown-it-py.readthedocs.io/en/latest/security.html
+[md-performance]: https://markdown-it-py.readthedocs.io/en/latest/performance.html
+[md-plugins]: https://markdown-it-py.readthedocs.io/en/latest/plugins.html
+
diff --git a/lib/markdown_it_py-4.0.0.dist-info/RECORD b/lib/markdown_it_py-4.0.0.dist-info/RECORD
new file mode 100644
index 0000000..55a1948
--- /dev/null
+++ b/lib/markdown_it_py-4.0.0.dist-info/RECORD
@@ -0,0 +1,142 @@
+../../bin/markdown-it,sha256=W3AzqoMVc-K93MTDhze1VTLE-VkAK11bh8a7c3bE6EI,192
+markdown_it/__init__.py,sha256=R7fMvDxageYJ4Q6doBcimogy1ctcV1eBuCFu5Pr8bbA,114
+markdown_it/__pycache__/__init__.cpython-314.pyc,,
+markdown_it/__pycache__/_compat.cpython-314.pyc,,
+markdown_it/__pycache__/_punycode.cpython-314.pyc,,
+markdown_it/__pycache__/main.cpython-314.pyc,,
+markdown_it/__pycache__/parser_block.cpython-314.pyc,,
+markdown_it/__pycache__/parser_core.cpython-314.pyc,,
+markdown_it/__pycache__/parser_inline.cpython-314.pyc,,
+markdown_it/__pycache__/renderer.cpython-314.pyc,,
+markdown_it/__pycache__/ruler.cpython-314.pyc,,
+markdown_it/__pycache__/token.cpython-314.pyc,,
+markdown_it/__pycache__/tree.cpython-314.pyc,,
+markdown_it/__pycache__/utils.cpython-314.pyc,,
+markdown_it/_compat.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35
+markdown_it/_punycode.py,sha256=JvSOZJ4VKr58z7unFGM0KhfTxqHMk2w8gglxae2QszM,2373
+markdown_it/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+markdown_it/cli/__pycache__/__init__.cpython-314.pyc,,
+markdown_it/cli/__pycache__/parse.cpython-314.pyc,,
+markdown_it/cli/parse.py,sha256=Un3N7fyGHhZAQouGVnRx-WZcpKwEK2OF08rzVAEBie8,2881
+markdown_it/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+markdown_it/common/__pycache__/__init__.cpython-314.pyc,,
+markdown_it/common/__pycache__/entities.cpython-314.pyc,,
+markdown_it/common/__pycache__/html_blocks.cpython-314.pyc,,
+markdown_it/common/__pycache__/html_re.cpython-314.pyc,,
+markdown_it/common/__pycache__/normalize_url.cpython-314.pyc,,
+markdown_it/common/__pycache__/utils.cpython-314.pyc,,
+markdown_it/common/entities.py,sha256=EYRCmUL7ZU1FRGLSXQlPx356lY8EUBdFyx96eSGc6d0,157
+markdown_it/common/html_blocks.py,sha256=QXbUDMoN9lXLgYFk2DBYllnLiFukL6dHn2X98Y6Wews,986
+markdown_it/common/html_re.py,sha256=FggAEv9IL8gHQqsGTkHcf333rTojwG0DQJMH9oVu0fU,926
+markdown_it/common/normalize_url.py,sha256=avOXnLd9xw5jU1q5PLftjAM9pvGx8l9QDEkmZSyrMgg,2568
+markdown_it/common/utils.py,sha256=pMgvMOE3ZW-BdJ7HfuzlXNKyD1Ivk7jHErc2J_B8J5M,8734
+markdown_it/helpers/__init__.py,sha256=YH2z7dS0WUc_9l51MWPvrLtFoBPh4JLGw58OuhGRCK0,253
+markdown_it/helpers/__pycache__/__init__.cpython-314.pyc,,
+markdown_it/helpers/__pycache__/parse_link_destination.cpython-314.pyc,,
+markdown_it/helpers/__pycache__/parse_link_label.cpython-314.pyc,,
+markdown_it/helpers/__pycache__/parse_link_title.cpython-314.pyc,,
+markdown_it/helpers/parse_link_destination.py,sha256=u-xxWVP3g1s7C1bQuQItiYyDrYoYHJzXaZXPgr-o6mY,1906
+markdown_it/helpers/parse_link_label.py,sha256=PIHG6ZMm3BUw0a2m17lCGqNrl3vaz911tuoGviWD3I4,1037
+markdown_it/helpers/parse_link_title.py,sha256=jkLoYQMKNeX9bvWQHkaSroiEo27HylkEUNmj8xBRlp4,2273
+markdown_it/main.py,sha256=vzuT23LJyKrPKNyHKKAbOHkNWpwIldOGUM-IGsv2DHM,12732
+markdown_it/parser_block.py,sha256=-MyugXB63Te71s4NcSQZiK5bE6BHkdFyZv_bviuatdI,3939
+markdown_it/parser_core.py,sha256=SRmJjqe8dC6GWzEARpWba59cBmxjCr3Gsg8h29O8sQk,1016
+markdown_it/parser_inline.py,sha256=y0jCig8CJxQO7hBz0ZY3sGvPlAKTohOwIgaqnlSaS5A,5024
+markdown_it/port.yaml,sha256=jt_rdwOnfocOV5nc35revTybAAQMIp_-1fla_527sVE,2447
+markdown_it/presets/__init__.py,sha256=22vFtwJEY7iqFRtgVZ-pJthcetfpr1Oig8XOF9x1328,970
+markdown_it/presets/__pycache__/__init__.cpython-314.pyc,,
+markdown_it/presets/__pycache__/commonmark.cpython-314.pyc,,
+markdown_it/presets/__pycache__/default.cpython-314.pyc,,
+markdown_it/presets/__pycache__/zero.cpython-314.pyc,,
+markdown_it/presets/commonmark.py,sha256=ygfb0R7WQ_ZoyQP3df-B0EnYMqNXCVOSw9SAdMjsGow,2869
+markdown_it/presets/default.py,sha256=FfKVUI0HH3M-_qy6RwotLStdC4PAaAxE7Dq0_KQtRtc,1811
+markdown_it/presets/zero.py,sha256=okXWTBEI-2nmwx5XKeCjxInRf65oC11gahtRl-QNtHM,2113
+markdown_it/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26
+markdown_it/renderer.py,sha256=Lzr0glqd5oxFL10DOfjjW8kg4Gp41idQ4viEQaE47oA,9947
+markdown_it/ruler.py,sha256=eMAtWGRAfSM33aiJed0k5923BEkuMVsMq1ct8vU-ql4,9142
+markdown_it/rules_block/__init__.py,sha256=SQpg0ocmsHeILPAWRHhzgLgJMKIcNkQyELH13o_6Ktc,553
+markdown_it/rules_block/__pycache__/__init__.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/blockquote.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/code.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/fence.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/heading.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/hr.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/html_block.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/lheading.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/list.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/paragraph.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/reference.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/state_block.cpython-314.pyc,,
+markdown_it/rules_block/__pycache__/table.cpython-314.pyc,,
+markdown_it/rules_block/blockquote.py,sha256=7uymS36dcrned3DsIaRcqcbFU1NlymhvsZpEXTD3_n8,8887
+markdown_it/rules_block/code.py,sha256=iTAxv0U1-MDhz88M1m1pi2vzOhEMSEROsXMo2Qq--kU,860
+markdown_it/rules_block/fence.py,sha256=BJgU-PqZ4vAlCqGcrc8UtdLpJJyMeRWN-G-Op-zxrMc,2537
+markdown_it/rules_block/heading.py,sha256=4Lh15rwoVsQjE1hVhpbhidQ0k9xKHihgjAeYSbwgO5k,1745
+markdown_it/rules_block/hr.py,sha256=QCoY5kImaQRvF7PyP8OoWft6A8JVH1v6MN-0HR9Ikpg,1227
+markdown_it/rules_block/html_block.py,sha256=wA8pb34LtZr1BkIATgGKQBIGX5jQNOkwZl9UGEqvb5M,2721
+markdown_it/rules_block/lheading.py,sha256=fWoEuUo7S2svr5UMKmyQMkh0hheYAHg2gMM266Mogs4,2625
+markdown_it/rules_block/list.py,sha256=gIodkAJFyOIyKCZCj5lAlL7jIj5kAzrDb-K-2MFNplY,9668
+markdown_it/rules_block/paragraph.py,sha256=9pmCwA7eMu4LBdV4fWKzC4EdwaOoaGw2kfeYSQiLye8,1819
+markdown_it/rules_block/reference.py,sha256=ue1qZbUaUP0GIvwTjh6nD1UtCij8uwsIMuYW1xBkckc,6983
+markdown_it/rules_block/state_block.py,sha256=HowsQyy5hGUibH4HRZWKfLIlXeDUnuWL7kpF0-rSwoM,8422
+markdown_it/rules_block/table.py,sha256=8nMd9ONGOffER7BXmc9kbbhxkLjtpX79dVLR0iatGnM,7682
+markdown_it/rules_core/__init__.py,sha256=QFGBe9TUjnRQJDU7xY4SQYpxyTHNwg8beTSwXpNGRjE,394
+markdown_it/rules_core/__pycache__/__init__.cpython-314.pyc,,
+markdown_it/rules_core/__pycache__/block.cpython-314.pyc,,
+markdown_it/rules_core/__pycache__/inline.cpython-314.pyc,,
+markdown_it/rules_core/__pycache__/linkify.cpython-314.pyc,,
+markdown_it/rules_core/__pycache__/normalize.cpython-314.pyc,,
+markdown_it/rules_core/__pycache__/replacements.cpython-314.pyc,,
+markdown_it/rules_core/__pycache__/smartquotes.cpython-314.pyc,,
+markdown_it/rules_core/__pycache__/state_core.cpython-314.pyc,,
+markdown_it/rules_core/__pycache__/text_join.cpython-314.pyc,,
+markdown_it/rules_core/block.py,sha256=0_JY1CUy-H2OooFtIEZAACtuoGUMohgxo4Z6A_UinSg,372
+markdown_it/rules_core/inline.py,sha256=9oWmeBhJHE7x47oJcN9yp6UsAZtrEY_A-VmfoMvKld4,325
+markdown_it/rules_core/linkify.py,sha256=mjQqpk_lHLh2Nxw4UFaLxa47Fgi-OHnmDamlgXnhmv0,5141
+markdown_it/rules_core/normalize.py,sha256=AJm4femtFJ_QBnM0dzh0UNqTTJk9K6KMtwRPaioZFqM,403
+markdown_it/rules_core/replacements.py,sha256=CH75mie-tdzdLKQtMBuCTcXAl1ijegdZGfbV_Vk7st0,3471
+markdown_it/rules_core/smartquotes.py,sha256=izK9fSyuTzA-zAUGkRkz9KwwCQWo40iRqcCKqOhFbEE,7443
+markdown_it/rules_core/state_core.py,sha256=HqWZCUr5fW7xG6jeQZDdO0hE9hxxyl3_-bawgOy57HY,570
+markdown_it/rules_core/text_join.py,sha256=rLXxNuLh_es5RvH31GsXi7en8bMNO9UJ5nbJMDBPltY,1173
+markdown_it/rules_inline/__init__.py,sha256=qqHZk6-YE8Rc12q6PxvVKBaxv2wmZeeo45H1XMR_Vxs,696
+markdown_it/rules_inline/__pycache__/__init__.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/autolink.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/backticks.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/balance_pairs.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/emphasis.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/entity.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/escape.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/fragments_join.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/html_inline.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/image.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/link.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/linkify.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/newline.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/state_inline.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/strikethrough.cpython-314.pyc,,
+markdown_it/rules_inline/__pycache__/text.cpython-314.pyc,,
+markdown_it/rules_inline/autolink.py,sha256=pPoqJY8i99VtFn7KgUzMackMeq1hytzioVvWs-VQPRo,2065
+markdown_it/rules_inline/backticks.py,sha256=J7bezjjNxiXlKqvHc0fJkHZwH7-2nBsXVjcKydk8E4M,2037
+markdown_it/rules_inline/balance_pairs.py,sha256=5zgBiGidqdiWmt7Io_cuZOYh5EFEfXrYRce8RXg5m7o,4852
+markdown_it/rules_inline/emphasis.py,sha256=7aDLZx0Jlekuvbu3uEUTDhJp00Z0Pj6g4C3-VLhI8Co,3123
+markdown_it/rules_inline/entity.py,sha256=CE8AIGMi5isEa24RNseo0wRmTTaj5YLbgTFdDmBesAU,1651
+markdown_it/rules_inline/escape.py,sha256=KGulwrP5FnqZM7GXY8lf7pyVv0YkR59taZDeHb5cmKg,1659
+markdown_it/rules_inline/fragments_join.py,sha256=_3JbwWYJz74gRHeZk6T8edVJT2IVSsi7FfmJJlieQlA,1493
+markdown_it/rules_inline/html_inline.py,sha256=SBg6HR0HRqCdrkkec0dfOYuQdAqyfeLRFLeQggtgjvg,1130
+markdown_it/rules_inline/image.py,sha256=Wbsg7jgnOtKXIwXGNJOlG7ORThkMkBVolxItC0ph6C0,4141
+markdown_it/rules_inline/link.py,sha256=2oD-fAdB0xyxDRtZLTjzLeWbzJ1k9bbPVQmohb58RuI,4258
+markdown_it/rules_inline/linkify.py,sha256=ifH6sb5wE8PGMWEw9Sr4x0DhMVfNOEBCfFSwKll2O-s,1706
+markdown_it/rules_inline/newline.py,sha256=329r0V3aDjzNtJcvzA3lsFYjzgBrShLAV5uf9hwQL_M,1297
+markdown_it/rules_inline/state_inline.py,sha256=d-menFzbz5FDy1JNgGBF-BASasnVI-9RuOxWz9PnKn4,5003
+markdown_it/rules_inline/strikethrough.py,sha256=pwcPlyhkh5pqFVxRCSrdW5dNCIOtU4eDit7TVDTPIVA,3214
+markdown_it/rules_inline/text.py,sha256=FQqaQRUqbnMLO9ZSWPWQUMEKH6JqWSSSmlZ5Ii9P48o,1119
+markdown_it/token.py,sha256=cWrt9kodfPdizHq_tYrzyIZNtJYNMN1813DPNlunwTg,6381
+markdown_it/tree.py,sha256=56Cdbwu2Aiks7kNYqO_fQZWpPb_n48CUllzjQQfgu1Y,11111
+markdown_it/utils.py,sha256=lVLeX7Af3GaNFfxmMgUbsn5p7cXbwhLq7RSf56UWuRE,5687
+markdown_it_py-4.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+markdown_it_py-4.0.0.dist-info/METADATA,sha256=6fyqHi2vP5bYQKCfuqo5T-qt83o22Ip7a2tnJIfGW_s,7288
+markdown_it_py-4.0.0.dist-info/RECORD,,
+markdown_it_py-4.0.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
+markdown_it_py-4.0.0.dist-info/entry_points.txt,sha256=T81l7fHQ3pllpQ4wUtQK6a8g_p6wxQbnjKVHCk2WMG4,58
+markdown_it_py-4.0.0.dist-info/licenses/LICENSE,sha256=SiJg1uLND1oVGh6G2_59PtVSseK-q_mUHBulxJy85IQ,1078
+markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it,sha256=eSxIxahJoV_fnjfovPnm0d0TsytGxkKnSKCkapkZ1HM,1073
diff --git a/lib/markdown_it_py-4.0.0.dist-info/WHEEL b/lib/markdown_it_py-4.0.0.dist-info/WHEEL
new file mode 100644
index 0000000..d8b9936
--- /dev/null
+++ b/lib/markdown_it_py-4.0.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.12.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/lib/markdown_it_py-4.0.0.dist-info/entry_points.txt b/lib/markdown_it_py-4.0.0.dist-info/entry_points.txt
new file mode 100644
index 0000000..7d829cd
--- /dev/null
+++ b/lib/markdown_it_py-4.0.0.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+markdown-it=markdown_it.cli.parse:main
+
diff --git a/lib/markdown_it_py-4.0.0.dist-info/licenses/LICENSE b/lib/markdown_it_py-4.0.0.dist-info/licenses/LICENSE
new file mode 100644
index 0000000..582ddf5
--- /dev/null
+++ b/lib/markdown_it_py-4.0.0.dist-info/licenses/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 ExecutableBookProject
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it b/lib/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it
new file mode 100644
index 0000000..7ffa058
--- /dev/null
+++ b/lib/markdown_it_py-4.0.0.dist-info/licenses/LICENSE.markdown-it
@@ -0,0 +1,22 @@
+Copyright (c) 2014 Vitaly Puzrin, Alex Kocharin.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/mdurl-0.1.2.dist-info/INSTALLER b/lib/mdurl-0.1.2.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/lib/mdurl-0.1.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/mdurl-0.1.2.dist-info/LICENSE b/lib/mdurl-0.1.2.dist-info/LICENSE
new file mode 100644
index 0000000..2a920c5
--- /dev/null
+++ b/lib/mdurl-0.1.2.dist-info/LICENSE
@@ -0,0 +1,46 @@
+Copyright (c) 2015 Vitaly Puzrin, Alex Kocharin.
+Copyright (c) 2021 Taneli Hukkinen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+.parse() is based on Joyent's node.js `url` code:
+
+Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/lib/mdurl-0.1.2.dist-info/METADATA b/lib/mdurl-0.1.2.dist-info/METADATA
new file mode 100644
index 0000000..b4670e8
--- /dev/null
+++ b/lib/mdurl-0.1.2.dist-info/METADATA
@@ -0,0 +1,32 @@
+Metadata-Version: 2.1
+Name: mdurl
+Version: 0.1.2
+Summary: Markdown URL utilities
+Keywords: markdown,commonmark
+Author-email: Taneli Hukkinen
+Requires-Python: >=3.7
+Description-Content-Type: text/markdown
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Typing :: Typed
+Project-URL: Homepage, https://github.com/executablebooks/mdurl
+
+# mdurl
+
+[](https://github.com/executablebooks/mdurl/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush)
+[](https://codecov.io/gh/executablebooks/mdurl)
+[](https://pypi.org/project/mdurl)
+
+This is a Python port of the JavaScript [mdurl](https://www.npmjs.com/package/mdurl) package.
+See the [upstream README.md file](https://github.com/markdown-it/mdurl/blob/master/README.md) for API documentation.
+
diff --git a/lib/mdurl-0.1.2.dist-info/RECORD b/lib/mdurl-0.1.2.dist-info/RECORD
new file mode 100644
index 0000000..355bb2f
--- /dev/null
+++ b/lib/mdurl-0.1.2.dist-info/RECORD
@@ -0,0 +1,18 @@
+mdurl-0.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+mdurl-0.1.2.dist-info/LICENSE,sha256=fGBd9uKGZ6lgMRjpgnT2SknOPu0NJvzM6VNKNF4O-VU,2338
+mdurl-0.1.2.dist-info/METADATA,sha256=tTsp1I9Jk2cFP9o8gefOJ9JVg4Drv4PmYCOwLrfd0l0,1638
+mdurl-0.1.2.dist-info/RECORD,,
+mdurl-0.1.2.dist-info/WHEEL,sha256=4TfKIB_xu-04bc2iKz6_zFt-gEFEEDU_31HGhqzOCE8,81
+mdurl/__init__.py,sha256=1vpE89NyXniIRZNC_4f6BPm3Ub4bPntjfyyhLRR7opU,547
+mdurl/__pycache__/__init__.cpython-314.pyc,,
+mdurl/__pycache__/_decode.cpython-314.pyc,,
+mdurl/__pycache__/_encode.cpython-314.pyc,,
+mdurl/__pycache__/_format.cpython-314.pyc,,
+mdurl/__pycache__/_parse.cpython-314.pyc,,
+mdurl/__pycache__/_url.cpython-314.pyc,,
+mdurl/_decode.py,sha256=3Q_gDQqU__TvDbu7x-b9LjbVl4QWy5g_qFwljcuvN_Y,3004
+mdurl/_encode.py,sha256=goJLUFt1h4rVZNqqm9t15Nw2W-bFXYQEy3aR01ImWvs,2602
+mdurl/_format.py,sha256=xZct0mdePXA0H3kAqxjGtlB5O86G35DAYMGkA44CmB4,626
+mdurl/_parse.py,sha256=ezZSkM2_4NQ2Zx047sEdcJG7NYQRFHiZK7Y8INHFzwY,11374
+mdurl/_url.py,sha256=5kQnRQN2A_G4svLnRzZcG0bfoD9AbBrYDXousDHZ3z0,284
+mdurl/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26
diff --git a/lib/mdurl-0.1.2.dist-info/WHEEL b/lib/mdurl-0.1.2.dist-info/WHEEL
new file mode 100644
index 0000000..668ba4d
--- /dev/null
+++ b/lib/mdurl-0.1.2.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.7.1
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/lib/mdurl/__init__.py b/lib/mdurl/__init__.py
new file mode 100644
index 0000000..cdbb640
--- /dev/null
+++ b/lib/mdurl/__init__.py
@@ -0,0 +1,18 @@
+__all__ = (
+ "decode",
+ "DECODE_DEFAULT_CHARS",
+ "DECODE_COMPONENT_CHARS",
+ "encode",
+ "ENCODE_DEFAULT_CHARS",
+ "ENCODE_COMPONENT_CHARS",
+ "format",
+ "parse",
+ "URL",
+)
+__version__ = "0.1.2" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
+
+from mdurl._decode import DECODE_COMPONENT_CHARS, DECODE_DEFAULT_CHARS, decode
+from mdurl._encode import ENCODE_COMPONENT_CHARS, ENCODE_DEFAULT_CHARS, encode
+from mdurl._format import format
+from mdurl._parse import url_parse as parse
+from mdurl._url import URL
diff --git a/lib/mdurl/__pycache__/__init__.cpython-314.pyc b/lib/mdurl/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..e946a91
Binary files /dev/null and b/lib/mdurl/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/mdurl/__pycache__/_decode.cpython-314.pyc b/lib/mdurl/__pycache__/_decode.cpython-314.pyc
new file mode 100644
index 0000000..ccb5d52
Binary files /dev/null and b/lib/mdurl/__pycache__/_decode.cpython-314.pyc differ
diff --git a/lib/mdurl/__pycache__/_encode.cpython-314.pyc b/lib/mdurl/__pycache__/_encode.cpython-314.pyc
new file mode 100644
index 0000000..491fae2
Binary files /dev/null and b/lib/mdurl/__pycache__/_encode.cpython-314.pyc differ
diff --git a/lib/mdurl/__pycache__/_format.cpython-314.pyc b/lib/mdurl/__pycache__/_format.cpython-314.pyc
new file mode 100644
index 0000000..8867605
Binary files /dev/null and b/lib/mdurl/__pycache__/_format.cpython-314.pyc differ
diff --git a/lib/mdurl/__pycache__/_parse.cpython-314.pyc b/lib/mdurl/__pycache__/_parse.cpython-314.pyc
new file mode 100644
index 0000000..6cc96fe
Binary files /dev/null and b/lib/mdurl/__pycache__/_parse.cpython-314.pyc differ
diff --git a/lib/mdurl/__pycache__/_url.cpython-314.pyc b/lib/mdurl/__pycache__/_url.cpython-314.pyc
new file mode 100644
index 0000000..7d01f4e
Binary files /dev/null and b/lib/mdurl/__pycache__/_url.cpython-314.pyc differ
diff --git a/lib/mdurl/_decode.py b/lib/mdurl/_decode.py
new file mode 100644
index 0000000..9b50a2d
--- /dev/null
+++ b/lib/mdurl/_decode.py
@@ -0,0 +1,104 @@
+from __future__ import annotations
+
+from collections.abc import Sequence
+import functools
+import re
+
+DECODE_DEFAULT_CHARS = ";/?:@&=+$,#"
+DECODE_COMPONENT_CHARS = ""
+
+decode_cache: dict[str, list[str]] = {}
+
+
+def get_decode_cache(exclude: str) -> Sequence[str]:
+ if exclude in decode_cache:
+ return decode_cache[exclude]
+
+ cache: list[str] = []
+ decode_cache[exclude] = cache
+
+ for i in range(128):
+ ch = chr(i)
+ cache.append(ch)
+
+ for i in range(len(exclude)):
+ ch_code = ord(exclude[i])
+ cache[ch_code] = "%" + ("0" + hex(ch_code)[2:].upper())[-2:]
+
+ return cache
+
+
+# Decode percent-encoded string.
+#
+def decode(string: str, exclude: str = DECODE_DEFAULT_CHARS) -> str:
+ cache = get_decode_cache(exclude)
+ repl_func = functools.partial(repl_func_with_cache, cache=cache)
+ return re.sub(r"(%[a-f0-9]{2})+", repl_func, string, flags=re.IGNORECASE)
+
+
+def repl_func_with_cache(match: re.Match, cache: Sequence[str]) -> str:
+ seq = match.group()
+ result = ""
+
+ i = 0
+ l = len(seq) # noqa: E741
+ while i < l:
+ b1 = int(seq[i + 1 : i + 3], 16)
+
+ if b1 < 0x80:
+ result += cache[b1]
+ i += 3 # emulate JS for loop statement3
+ continue
+
+ if (b1 & 0xE0) == 0xC0 and (i + 3 < l):
+ # 110xxxxx 10xxxxxx
+ b2 = int(seq[i + 4 : i + 6], 16)
+
+ if (b2 & 0xC0) == 0x80:
+ all_bytes = bytes((b1, b2))
+ try:
+ result += all_bytes.decode()
+ except UnicodeDecodeError:
+ result += "\ufffd" * 2
+
+ i += 3
+ i += 3 # emulate JS for loop statement3
+ continue
+
+ if (b1 & 0xF0) == 0xE0 and (i + 6 < l):
+ # 1110xxxx 10xxxxxx 10xxxxxx
+ b2 = int(seq[i + 4 : i + 6], 16)
+ b3 = int(seq[i + 7 : i + 9], 16)
+
+ if (b2 & 0xC0) == 0x80 and (b3 & 0xC0) == 0x80:
+ all_bytes = bytes((b1, b2, b3))
+ try:
+ result += all_bytes.decode()
+ except UnicodeDecodeError:
+ result += "\ufffd" * 3
+
+ i += 6
+ i += 3 # emulate JS for loop statement3
+ continue
+
+ if (b1 & 0xF8) == 0xF0 and (i + 9 < l):
+ # 111110xx 10xxxxxx 10xxxxxx 10xxxxxx
+ b2 = int(seq[i + 4 : i + 6], 16)
+ b3 = int(seq[i + 7 : i + 9], 16)
+ b4 = int(seq[i + 10 : i + 12], 16)
+
+ if (b2 & 0xC0) == 0x80 and (b3 & 0xC0) == 0x80 and (b4 & 0xC0) == 0x80:
+ all_bytes = bytes((b1, b2, b3, b4))
+ try:
+ result += all_bytes.decode()
+ except UnicodeDecodeError:
+ result += "\ufffd" * 4
+
+ i += 9
+ i += 3 # emulate JS for loop statement3
+ continue
+
+ result += "\ufffd"
+ i += 3 # emulate JS for loop statement3
+
+ return result
diff --git a/lib/mdurl/_encode.py b/lib/mdurl/_encode.py
new file mode 100644
index 0000000..bc2e5b9
--- /dev/null
+++ b/lib/mdurl/_encode.py
@@ -0,0 +1,85 @@
+from __future__ import annotations
+
+from collections.abc import Sequence
+from string import ascii_letters, digits, hexdigits
+from urllib.parse import quote as encode_uri_component
+
+ASCII_LETTERS_AND_DIGITS = ascii_letters + digits
+
+ENCODE_DEFAULT_CHARS = ";/?:@&=+$,-_.!~*'()#"
+ENCODE_COMPONENT_CHARS = "-_.!~*'()"
+
+encode_cache: dict[str, list[str]] = {}
+
+
+# Create a lookup array where anything but characters in `chars` string
+# and alphanumeric chars is percent-encoded.
+def get_encode_cache(exclude: str) -> Sequence[str]:
+ if exclude in encode_cache:
+ return encode_cache[exclude]
+
+ cache: list[str] = []
+ encode_cache[exclude] = cache
+
+ for i in range(128):
+ ch = chr(i)
+
+ if ch in ASCII_LETTERS_AND_DIGITS:
+ # always allow unencoded alphanumeric characters
+ cache.append(ch)
+ else:
+ cache.append("%" + ("0" + hex(i)[2:].upper())[-2:])
+
+ for i in range(len(exclude)):
+ cache[ord(exclude[i])] = exclude[i]
+
+ return cache
+
+
+# Encode unsafe characters with percent-encoding, skipping already
+# encoded sequences.
+#
+# - string - string to encode
+# - exclude - list of characters to ignore (in addition to a-zA-Z0-9)
+# - keepEscaped - don't encode '%' in a correct escape sequence (default: true)
+def encode(
+ string: str, exclude: str = ENCODE_DEFAULT_CHARS, *, keep_escaped: bool = True
+) -> str:
+ result = ""
+
+ cache = get_encode_cache(exclude)
+
+ l = len(string) # noqa: E741
+ i = 0
+ while i < l:
+ code = ord(string[i])
+
+ # %
+ if keep_escaped and code == 0x25 and i + 2 < l:
+ if all(c in hexdigits for c in string[i + 1 : i + 3]):
+ result += string[i : i + 3]
+ i += 2
+ i += 1 # JS for loop statement3
+ continue
+
+ if code < 128:
+ result += cache[code]
+ i += 1 # JS for loop statement3
+ continue
+
+ if code >= 0xD800 and code <= 0xDFFF:
+ if code >= 0xD800 and code <= 0xDBFF and i + 1 < l:
+ next_code = ord(string[i + 1])
+ if next_code >= 0xDC00 and next_code <= 0xDFFF:
+ result += encode_uri_component(string[i] + string[i + 1])
+ i += 1
+ i += 1 # JS for loop statement3
+ continue
+ result += "%EF%BF%BD"
+ i += 1 # JS for loop statement3
+ continue
+
+ result += encode_uri_component(string[i])
+ i += 1 # JS for loop statement3
+
+ return result
diff --git a/lib/mdurl/_format.py b/lib/mdurl/_format.py
new file mode 100644
index 0000000..12524ca
--- /dev/null
+++ b/lib/mdurl/_format.py
@@ -0,0 +1,27 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from mdurl._url import URL
+
+
+def format(url: URL) -> str: # noqa: A001
+ result = ""
+
+ result += url.protocol or ""
+ result += "//" if url.slashes else ""
+ result += url.auth + "@" if url.auth else ""
+
+ if url.hostname and ":" in url.hostname:
+ # ipv6 address
+ result += "[" + url.hostname + "]"
+ else:
+ result += url.hostname or ""
+
+ result += ":" + url.port if url.port else ""
+ result += url.pathname or ""
+ result += url.search or ""
+ result += url.hash or ""
+
+ return result
diff --git a/lib/mdurl/_parse.py b/lib/mdurl/_parse.py
new file mode 100644
index 0000000..ffeeac7
--- /dev/null
+++ b/lib/mdurl/_parse.py
@@ -0,0 +1,304 @@
+# Copyright Joyent, Inc. and other Node contributors.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the
+# following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
+# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+# USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+# Changes from joyent/node:
+#
+# 1. No leading slash in paths,
+# e.g. in `url.parse('http://foo?bar')` pathname is ``, not `/`
+#
+# 2. Backslashes are not replaced with slashes,
+# so `http:\\example.org\` is treated like a relative path
+#
+# 3. Trailing colon is treated like a part of the path,
+# i.e. in `http://example.org:foo` pathname is `:foo`
+#
+# 4. Nothing is URL-encoded in the resulting object,
+# (in joyent/node some chars in auth and paths are encoded)
+#
+# 5. `url.parse()` does not have `parseQueryString` argument
+#
+# 6. Removed extraneous result properties: `host`, `path`, `query`, etc.,
+# which can be constructed using other parts of the url.
+
+from __future__ import annotations
+
+from collections import defaultdict
+import re
+
+from mdurl._url import URL
+
+# Reference: RFC 3986, RFC 1808, RFC 2396
+
+# define these here so at least they only have to be
+# compiled once on the first module load.
+PROTOCOL_PATTERN = re.compile(r"^([a-z0-9.+-]+:)", flags=re.IGNORECASE)
+PORT_PATTERN = re.compile(r":[0-9]*$")
+
+# Special case for a simple path URL
+SIMPLE_PATH_PATTERN = re.compile(r"^(//?(?!/)[^?\s]*)(\?[^\s]*)?$")
+
+# RFC 2396: characters reserved for delimiting URLs.
+# We actually just auto-escape these.
+DELIMS = ("<", ">", '"', "`", " ", "\r", "\n", "\t")
+
+# RFC 2396: characters not allowed for various reasons.
+UNWISE = ("{", "}", "|", "\\", "^", "`") + DELIMS
+
+# Allowed by RFCs, but cause of XSS attacks. Always escape these.
+AUTO_ESCAPE = ("'",) + UNWISE
+# Characters that are never ever allowed in a hostname.
+# Note that any invalid chars are also handled, but these
+# are the ones that are *expected* to be seen, so we fast-path
+# them.
+NON_HOST_CHARS = ("%", "/", "?", ";", "#") + AUTO_ESCAPE
+HOST_ENDING_CHARS = ("/", "?", "#")
+HOSTNAME_MAX_LEN = 255
+HOSTNAME_PART_PATTERN = re.compile(r"^[+a-z0-9A-Z_-]{0,63}$")
+HOSTNAME_PART_START = re.compile(r"^([+a-z0-9A-Z_-]{0,63})(.*)$")
+# protocols that can allow "unsafe" and "unwise" chars.
+
+# protocols that never have a hostname.
+HOSTLESS_PROTOCOL = defaultdict(
+ bool,
+ {
+ "javascript": True,
+ "javascript:": True,
+ },
+)
+# protocols that always contain a // bit.
+SLASHED_PROTOCOL = defaultdict(
+ bool,
+ {
+ "http": True,
+ "https": True,
+ "ftp": True,
+ "gopher": True,
+ "file": True,
+ "http:": True,
+ "https:": True,
+ "ftp:": True,
+ "gopher:": True,
+ "file:": True,
+ },
+)
+
+
+class MutableURL:
+ def __init__(self) -> None:
+ self.protocol: str | None = None
+ self.slashes: bool = False
+ self.auth: str | None = None
+ self.port: str | None = None
+ self.hostname: str | None = None
+ self.hash: str | None = None
+ self.search: str | None = None
+ self.pathname: str | None = None
+
+ def parse(self, url: str, slashes_denote_host: bool) -> "MutableURL":
+ lower_proto = ""
+ slashes = False
+ rest = url
+
+ # trim before proceeding.
+ # This is to support parse stuff like " http://foo.com \n"
+ rest = rest.strip()
+
+ if not slashes_denote_host and len(url.split("#")) == 1:
+ # Try fast path regexp
+ simple_path = SIMPLE_PATH_PATTERN.match(rest)
+ if simple_path:
+ self.pathname = simple_path.group(1)
+ if simple_path.group(2):
+ self.search = simple_path.group(2)
+ return self
+
+ proto = ""
+ proto_match = PROTOCOL_PATTERN.match(rest)
+ if proto_match:
+ proto = proto_match.group()
+ lower_proto = proto.lower()
+ self.protocol = proto
+ rest = rest[len(proto) :]
+
+ # figure out if it's got a host
+ # user@server is *always* interpreted as a hostname, and url
+ # resolution will treat //foo/bar as host=foo,path=bar because that's
+ # how the browser resolves relative URLs.
+ if slashes_denote_host or proto or re.search(r"^//[^@/]+@[^@/]+", rest):
+ slashes = rest.startswith("//")
+ if slashes and not (proto and HOSTLESS_PROTOCOL[proto]):
+ rest = rest[2:]
+ self.slashes = True
+
+ if not HOSTLESS_PROTOCOL[proto] and (
+ slashes or (proto and not SLASHED_PROTOCOL[proto])
+ ):
+
+ # there's a hostname.
+ # the first instance of /, ?, ;, or # ends the host.
+ #
+ # If there is an @ in the hostname, then non-host chars *are* allowed
+ # to the left of the last @ sign, unless some host-ending character
+ # comes *before* the @-sign.
+ # URLs are obnoxious.
+ #
+ # ex:
+ # http://a@b@c/ => user:a@b host:c
+ # http://a@b?@c => user:a host:c path:/?@c
+
+ # v0.12 TODO(isaacs): This is not quite how Chrome does things.
+ # Review our test case against browsers more comprehensively.
+
+ # find the first instance of any hostEndingChars
+ host_end = -1
+ for i in range(len(HOST_ENDING_CHARS)):
+ hec = rest.find(HOST_ENDING_CHARS[i])
+ if hec != -1 and (host_end == -1 or hec < host_end):
+ host_end = hec
+
+ # at this point, either we have an explicit point where the
+ # auth portion cannot go past, or the last @ char is the decider.
+ if host_end == -1:
+ # atSign can be anywhere.
+ at_sign = rest.rfind("@")
+ else:
+ # atSign must be in auth portion.
+ # http://a@b/c@d => host:b auth:a path:/c@d
+ at_sign = rest.rfind("@", 0, host_end + 1)
+
+ # Now we have a portion which is definitely the auth.
+ # Pull that off.
+ if at_sign != -1:
+ auth = rest[:at_sign]
+ rest = rest[at_sign + 1 :]
+ self.auth = auth
+
+ # the host is the remaining to the left of the first non-host char
+ host_end = -1
+ for i in range(len(NON_HOST_CHARS)):
+ hec = rest.find(NON_HOST_CHARS[i])
+ if hec != -1 and (host_end == -1 or hec < host_end):
+ host_end = hec
+ # if we still have not hit it, then the entire thing is a host.
+ if host_end == -1:
+ host_end = len(rest)
+
+ if host_end > 0 and rest[host_end - 1] == ":":
+ host_end -= 1
+ host = rest[:host_end]
+ rest = rest[host_end:]
+
+ # pull out port.
+ self.parse_host(host)
+
+ # we've indicated that there is a hostname,
+ # so even if it's empty, it has to be present.
+ self.hostname = self.hostname or ""
+
+ # if hostname begins with [ and ends with ]
+ # assume that it's an IPv6 address.
+ ipv6_hostname = self.hostname.startswith("[") and self.hostname.endswith(
+ "]"
+ )
+
+ # validate a little.
+ if not ipv6_hostname:
+ hostparts = self.hostname.split(".")
+ l = len(hostparts) # noqa: E741
+ i = 0
+ while i < l:
+ part = hostparts[i]
+ if not part:
+ i += 1 # emulate statement3 in JS for loop
+ continue
+ if not HOSTNAME_PART_PATTERN.search(part):
+ newpart = ""
+ k = len(part)
+ j = 0
+ while j < k:
+ if ord(part[j]) > 127:
+ # we replace non-ASCII char with a temporary placeholder
+ # we need this to make sure size of hostname is not
+ # broken by replacing non-ASCII by nothing
+ newpart += "x"
+ else:
+ newpart += part[j]
+ j += 1 # emulate statement3 in JS for loop
+
+ # we test again with ASCII char only
+ if not HOSTNAME_PART_PATTERN.search(newpart):
+ valid_parts = hostparts[:i]
+ not_host = hostparts[i + 1 :]
+ bit = HOSTNAME_PART_START.search(part)
+ if bit:
+ valid_parts.append(bit.group(1))
+ not_host.insert(0, bit.group(2))
+ if not_host:
+ rest = ".".join(not_host) + rest
+ self.hostname = ".".join(valid_parts)
+ break
+ i += 1 # emulate statement3 in JS for loop
+
+ if len(self.hostname) > HOSTNAME_MAX_LEN:
+ self.hostname = ""
+
+ # strip [ and ] from the hostname
+ # the host field still retains them, though
+ if ipv6_hostname:
+ self.hostname = self.hostname[1:-1]
+
+ # chop off from the tail first.
+ hash = rest.find("#") # noqa: A001
+ if hash != -1:
+ # got a fragment string.
+ self.hash = rest[hash:]
+ rest = rest[:hash]
+ qm = rest.find("?")
+ if qm != -1:
+ self.search = rest[qm:]
+ rest = rest[:qm]
+ if rest:
+ self.pathname = rest
+ if SLASHED_PROTOCOL[lower_proto] and self.hostname and not self.pathname:
+ self.pathname = ""
+
+ return self
+
+ def parse_host(self, host: str) -> None:
+ port_match = PORT_PATTERN.search(host)
+ if port_match:
+ port = port_match.group()
+ if port != ":":
+ self.port = port[1:]
+ host = host[: -len(port)]
+ if host:
+ self.hostname = host
+
+
+def url_parse(url: URL | str, *, slashes_denote_host: bool = False) -> URL:
+ if isinstance(url, URL):
+ return url
+ u = MutableURL()
+ u.parse(url, slashes_denote_host)
+ return URL(
+ u.protocol, u.slashes, u.auth, u.port, u.hostname, u.hash, u.search, u.pathname
+ )
diff --git a/lib/mdurl/_url.py b/lib/mdurl/_url.py
new file mode 100644
index 0000000..f866e7a
--- /dev/null
+++ b/lib/mdurl/_url.py
@@ -0,0 +1,14 @@
+from __future__ import annotations
+
+from typing import NamedTuple
+
+
+class URL(NamedTuple):
+ protocol: str | None
+ slashes: bool
+ auth: str | None
+ port: str | None
+ hostname: str | None
+ hash: str | None # noqa: A003
+ search: str | None
+ pathname: str | None
diff --git a/lib/mdurl/py.typed b/lib/mdurl/py.typed
new file mode 100644
index 0000000..7632ecf
--- /dev/null
+++ b/lib/mdurl/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561
diff --git a/lib/nacl/__init__.py b/lib/nacl/__init__.py
new file mode 100644
index 0000000..83aaacf
--- /dev/null
+++ b/lib/nacl/__init__.py
@@ -0,0 +1,25 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+__all__ = [
+ "__uri__",
+ "__version__",
+ "__email__",
+]
+
+__uri__ = "https://github.com/pyca/pynacl/"
+
+# Must be kept in sync with `pyproject.toml`
+__version__ = "1.6.2"
diff --git a/lib/nacl/__pycache__/__init__.cpython-314.pyc b/lib/nacl/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..22e167e
Binary files /dev/null and b/lib/nacl/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/nacl/__pycache__/encoding.cpython-314.pyc b/lib/nacl/__pycache__/encoding.cpython-314.pyc
new file mode 100644
index 0000000..da4c433
Binary files /dev/null and b/lib/nacl/__pycache__/encoding.cpython-314.pyc differ
diff --git a/lib/nacl/__pycache__/exceptions.cpython-314.pyc b/lib/nacl/__pycache__/exceptions.cpython-314.pyc
new file mode 100644
index 0000000..6c685f4
Binary files /dev/null and b/lib/nacl/__pycache__/exceptions.cpython-314.pyc differ
diff --git a/lib/nacl/__pycache__/hash.cpython-314.pyc b/lib/nacl/__pycache__/hash.cpython-314.pyc
new file mode 100644
index 0000000..1989636
Binary files /dev/null and b/lib/nacl/__pycache__/hash.cpython-314.pyc differ
diff --git a/lib/nacl/__pycache__/hashlib.cpython-314.pyc b/lib/nacl/__pycache__/hashlib.cpython-314.pyc
new file mode 100644
index 0000000..036846e
Binary files /dev/null and b/lib/nacl/__pycache__/hashlib.cpython-314.pyc differ
diff --git a/lib/nacl/__pycache__/public.cpython-314.pyc b/lib/nacl/__pycache__/public.cpython-314.pyc
new file mode 100644
index 0000000..9573442
Binary files /dev/null and b/lib/nacl/__pycache__/public.cpython-314.pyc differ
diff --git a/lib/nacl/__pycache__/secret.cpython-314.pyc b/lib/nacl/__pycache__/secret.cpython-314.pyc
new file mode 100644
index 0000000..e0a547c
Binary files /dev/null and b/lib/nacl/__pycache__/secret.cpython-314.pyc differ
diff --git a/lib/nacl/__pycache__/signing.cpython-314.pyc b/lib/nacl/__pycache__/signing.cpython-314.pyc
new file mode 100644
index 0000000..1eb46a7
Binary files /dev/null and b/lib/nacl/__pycache__/signing.cpython-314.pyc differ
diff --git a/lib/nacl/__pycache__/utils.cpython-314.pyc b/lib/nacl/__pycache__/utils.cpython-314.pyc
new file mode 100644
index 0000000..35aab69
Binary files /dev/null and b/lib/nacl/__pycache__/utils.cpython-314.pyc differ
diff --git a/lib/nacl/_sodium.abi3.so b/lib/nacl/_sodium.abi3.so
new file mode 100755
index 0000000..8e5581b
Binary files /dev/null and b/lib/nacl/_sodium.abi3.so differ
diff --git a/lib/nacl/bindings/__init__.py b/lib/nacl/bindings/__init__.py
new file mode 100644
index 0000000..2e07ba1
--- /dev/null
+++ b/lib/nacl/bindings/__init__.py
@@ -0,0 +1,508 @@
+# Copyright 2013-2019 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from nacl.bindings.crypto_aead import (
+ crypto_aead_aegis128l_ABYTES,
+ crypto_aead_aegis128l_KEYBYTES,
+ crypto_aead_aegis128l_MESSAGEBYTES_MAX,
+ crypto_aead_aegis128l_NPUBBYTES,
+ crypto_aead_aegis128l_NSECBYTES,
+ crypto_aead_aegis128l_decrypt,
+ crypto_aead_aegis128l_encrypt,
+ crypto_aead_aegis256_ABYTES,
+ crypto_aead_aegis256_KEYBYTES,
+ crypto_aead_aegis256_MESSAGEBYTES_MAX,
+ crypto_aead_aegis256_NPUBBYTES,
+ crypto_aead_aegis256_NSECBYTES,
+ crypto_aead_aegis256_decrypt,
+ crypto_aead_aegis256_encrypt,
+ crypto_aead_aes256gcm_ABYTES,
+ crypto_aead_aes256gcm_KEYBYTES,
+ crypto_aead_aes256gcm_MESSAGEBYTES_MAX,
+ crypto_aead_aes256gcm_NPUBBYTES,
+ crypto_aead_aes256gcm_NSECBYTES,
+ crypto_aead_aes256gcm_decrypt,
+ crypto_aead_aes256gcm_encrypt,
+ crypto_aead_chacha20poly1305_ABYTES,
+ crypto_aead_chacha20poly1305_KEYBYTES,
+ crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX,
+ crypto_aead_chacha20poly1305_NPUBBYTES,
+ crypto_aead_chacha20poly1305_NSECBYTES,
+ crypto_aead_chacha20poly1305_decrypt,
+ crypto_aead_chacha20poly1305_encrypt,
+ crypto_aead_chacha20poly1305_ietf_ABYTES,
+ crypto_aead_chacha20poly1305_ietf_KEYBYTES,
+ crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX,
+ crypto_aead_chacha20poly1305_ietf_NPUBBYTES,
+ crypto_aead_chacha20poly1305_ietf_NSECBYTES,
+ crypto_aead_chacha20poly1305_ietf_decrypt,
+ crypto_aead_chacha20poly1305_ietf_encrypt,
+ crypto_aead_xchacha20poly1305_ietf_ABYTES,
+ crypto_aead_xchacha20poly1305_ietf_KEYBYTES,
+ crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX,
+ crypto_aead_xchacha20poly1305_ietf_NPUBBYTES,
+ crypto_aead_xchacha20poly1305_ietf_NSECBYTES,
+ crypto_aead_xchacha20poly1305_ietf_decrypt,
+ crypto_aead_xchacha20poly1305_ietf_encrypt,
+)
+from nacl.bindings.crypto_box import (
+ crypto_box,
+ crypto_box_BEFORENMBYTES,
+ crypto_box_BOXZEROBYTES,
+ crypto_box_NONCEBYTES,
+ crypto_box_PUBLICKEYBYTES,
+ crypto_box_SEALBYTES,
+ crypto_box_SECRETKEYBYTES,
+ crypto_box_SEEDBYTES,
+ crypto_box_ZEROBYTES,
+ crypto_box_afternm,
+ crypto_box_beforenm,
+ crypto_box_easy,
+ crypto_box_easy_afternm,
+ crypto_box_keypair,
+ crypto_box_open,
+ crypto_box_open_afternm,
+ crypto_box_open_easy,
+ crypto_box_open_easy_afternm,
+ crypto_box_seal,
+ crypto_box_seal_open,
+ crypto_box_seed_keypair,
+)
+from nacl.bindings.crypto_core import (
+ crypto_core_ed25519_BYTES,
+ crypto_core_ed25519_NONREDUCEDSCALARBYTES,
+ crypto_core_ed25519_SCALARBYTES,
+ crypto_core_ed25519_add,
+ crypto_core_ed25519_from_uniform,
+ crypto_core_ed25519_is_valid_point,
+ crypto_core_ed25519_scalar_add,
+ crypto_core_ed25519_scalar_complement,
+ crypto_core_ed25519_scalar_invert,
+ crypto_core_ed25519_scalar_mul,
+ crypto_core_ed25519_scalar_negate,
+ crypto_core_ed25519_scalar_reduce,
+ crypto_core_ed25519_scalar_sub,
+ crypto_core_ed25519_sub,
+ has_crypto_core_ed25519,
+)
+from nacl.bindings.crypto_generichash import (
+ crypto_generichash_BYTES,
+ crypto_generichash_BYTES_MAX,
+ crypto_generichash_BYTES_MIN,
+ crypto_generichash_KEYBYTES,
+ crypto_generichash_KEYBYTES_MAX,
+ crypto_generichash_KEYBYTES_MIN,
+ crypto_generichash_PERSONALBYTES,
+ crypto_generichash_SALTBYTES,
+ crypto_generichash_STATEBYTES,
+ generichash_blake2b_final as crypto_generichash_blake2b_final,
+ generichash_blake2b_init as crypto_generichash_blake2b_init,
+ generichash_blake2b_salt_personal as crypto_generichash_blake2b_salt_personal,
+ generichash_blake2b_update as crypto_generichash_blake2b_update,
+)
+from nacl.bindings.crypto_hash import (
+ crypto_hash,
+ crypto_hash_BYTES,
+ crypto_hash_sha256,
+ crypto_hash_sha256_BYTES,
+ crypto_hash_sha512,
+ crypto_hash_sha512_BYTES,
+)
+from nacl.bindings.crypto_kx import (
+ crypto_kx_PUBLIC_KEY_BYTES,
+ crypto_kx_SECRET_KEY_BYTES,
+ crypto_kx_SEED_BYTES,
+ crypto_kx_SESSION_KEY_BYTES,
+ crypto_kx_client_session_keys,
+ crypto_kx_keypair,
+ crypto_kx_seed_keypair,
+ crypto_kx_server_session_keys,
+)
+from nacl.bindings.crypto_pwhash import (
+ crypto_pwhash_ALG_ARGON2I13,
+ crypto_pwhash_ALG_ARGON2ID13,
+ crypto_pwhash_ALG_DEFAULT,
+ crypto_pwhash_BYTES_MAX,
+ crypto_pwhash_BYTES_MIN,
+ crypto_pwhash_PASSWD_MAX,
+ crypto_pwhash_PASSWD_MIN,
+ crypto_pwhash_SALTBYTES,
+ crypto_pwhash_STRBYTES,
+ crypto_pwhash_alg,
+ crypto_pwhash_argon2i_MEMLIMIT_INTERACTIVE,
+ crypto_pwhash_argon2i_MEMLIMIT_MAX,
+ crypto_pwhash_argon2i_MEMLIMIT_MIN,
+ crypto_pwhash_argon2i_MEMLIMIT_MODERATE,
+ crypto_pwhash_argon2i_MEMLIMIT_SENSITIVE,
+ crypto_pwhash_argon2i_OPSLIMIT_INTERACTIVE,
+ crypto_pwhash_argon2i_OPSLIMIT_MAX,
+ crypto_pwhash_argon2i_OPSLIMIT_MIN,
+ crypto_pwhash_argon2i_OPSLIMIT_MODERATE,
+ crypto_pwhash_argon2i_OPSLIMIT_SENSITIVE,
+ crypto_pwhash_argon2i_STRPREFIX,
+ crypto_pwhash_argon2id_MEMLIMIT_INTERACTIVE,
+ crypto_pwhash_argon2id_MEMLIMIT_MAX,
+ crypto_pwhash_argon2id_MEMLIMIT_MIN,
+ crypto_pwhash_argon2id_MEMLIMIT_MODERATE,
+ crypto_pwhash_argon2id_MEMLIMIT_SENSITIVE,
+ crypto_pwhash_argon2id_OPSLIMIT_INTERACTIVE,
+ crypto_pwhash_argon2id_OPSLIMIT_MAX,
+ crypto_pwhash_argon2id_OPSLIMIT_MIN,
+ crypto_pwhash_argon2id_OPSLIMIT_MODERATE,
+ crypto_pwhash_argon2id_OPSLIMIT_SENSITIVE,
+ crypto_pwhash_argon2id_STRPREFIX,
+ crypto_pwhash_scryptsalsa208sha256_BYTES_MAX,
+ crypto_pwhash_scryptsalsa208sha256_BYTES_MIN,
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_INTERACTIVE,
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MAX,
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MIN,
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_SENSITIVE,
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_INTERACTIVE,
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MAX,
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MIN,
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_SENSITIVE,
+ crypto_pwhash_scryptsalsa208sha256_PASSWD_MAX,
+ crypto_pwhash_scryptsalsa208sha256_PASSWD_MIN,
+ crypto_pwhash_scryptsalsa208sha256_SALTBYTES,
+ crypto_pwhash_scryptsalsa208sha256_STRBYTES,
+ crypto_pwhash_scryptsalsa208sha256_STRPREFIX,
+ crypto_pwhash_scryptsalsa208sha256_ll,
+ crypto_pwhash_scryptsalsa208sha256_str,
+ crypto_pwhash_scryptsalsa208sha256_str_verify,
+ crypto_pwhash_str_alg,
+ crypto_pwhash_str_verify,
+ has_crypto_pwhash_scryptsalsa208sha256,
+ nacl_bindings_pick_scrypt_params,
+)
+from nacl.bindings.crypto_scalarmult import (
+ crypto_scalarmult,
+ crypto_scalarmult_BYTES,
+ crypto_scalarmult_SCALARBYTES,
+ crypto_scalarmult_base,
+ crypto_scalarmult_ed25519,
+ crypto_scalarmult_ed25519_BYTES,
+ crypto_scalarmult_ed25519_SCALARBYTES,
+ crypto_scalarmult_ed25519_base,
+ crypto_scalarmult_ed25519_base_noclamp,
+ crypto_scalarmult_ed25519_noclamp,
+ has_crypto_scalarmult_ed25519,
+)
+from nacl.bindings.crypto_secretbox import (
+ crypto_secretbox,
+ crypto_secretbox_BOXZEROBYTES,
+ crypto_secretbox_KEYBYTES,
+ crypto_secretbox_MACBYTES,
+ crypto_secretbox_MESSAGEBYTES_MAX,
+ crypto_secretbox_NONCEBYTES,
+ crypto_secretbox_ZEROBYTES,
+ crypto_secretbox_easy,
+ crypto_secretbox_open,
+ crypto_secretbox_open_easy,
+)
+from nacl.bindings.crypto_secretstream import (
+ crypto_secretstream_xchacha20poly1305_ABYTES,
+ crypto_secretstream_xchacha20poly1305_HEADERBYTES,
+ crypto_secretstream_xchacha20poly1305_KEYBYTES,
+ crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX,
+ crypto_secretstream_xchacha20poly1305_STATEBYTES,
+ crypto_secretstream_xchacha20poly1305_TAG_FINAL,
+ crypto_secretstream_xchacha20poly1305_TAG_MESSAGE,
+ crypto_secretstream_xchacha20poly1305_TAG_PUSH,
+ crypto_secretstream_xchacha20poly1305_TAG_REKEY,
+ crypto_secretstream_xchacha20poly1305_init_pull,
+ crypto_secretstream_xchacha20poly1305_init_push,
+ crypto_secretstream_xchacha20poly1305_keygen,
+ crypto_secretstream_xchacha20poly1305_pull,
+ crypto_secretstream_xchacha20poly1305_push,
+ crypto_secretstream_xchacha20poly1305_rekey,
+ crypto_secretstream_xchacha20poly1305_state,
+)
+from nacl.bindings.crypto_shorthash import (
+ BYTES as crypto_shorthash_siphash24_BYTES,
+ KEYBYTES as crypto_shorthash_siphash24_KEYBYTES,
+ XBYTES as crypto_shorthash_siphashx24_BYTES,
+ XKEYBYTES as crypto_shorthash_siphashx24_KEYBYTES,
+ crypto_shorthash_siphash24,
+ crypto_shorthash_siphashx24,
+ has_crypto_shorthash_siphashx24,
+)
+from nacl.bindings.crypto_sign import (
+ crypto_sign,
+ crypto_sign_BYTES,
+ crypto_sign_PUBLICKEYBYTES,
+ crypto_sign_SECRETKEYBYTES,
+ crypto_sign_SEEDBYTES,
+ crypto_sign_ed25519_pk_to_curve25519,
+ crypto_sign_ed25519_sk_to_curve25519,
+ crypto_sign_ed25519_sk_to_pk,
+ crypto_sign_ed25519_sk_to_seed,
+ crypto_sign_ed25519ph_STATEBYTES,
+ crypto_sign_ed25519ph_final_create,
+ crypto_sign_ed25519ph_final_verify,
+ crypto_sign_ed25519ph_state,
+ crypto_sign_ed25519ph_update,
+ crypto_sign_keypair,
+ crypto_sign_open,
+ crypto_sign_seed_keypair,
+)
+from nacl.bindings.randombytes import (
+ randombytes,
+ randombytes_buf_deterministic,
+)
+from nacl.bindings.sodium_core import sodium_init
+from nacl.bindings.utils import (
+ sodium_add,
+ sodium_increment,
+ sodium_memcmp,
+ sodium_pad,
+ sodium_unpad,
+)
+
+
+__all__ = [
+ "crypto_aead_aegis128l_ABYTES",
+ "crypto_aead_aegis128l_KEYBYTES",
+ "crypto_aead_aegis128l_MESSAGEBYTES_MAX",
+ "crypto_aead_aegis128l_NPUBBYTES",
+ "crypto_aead_aegis128l_NSECBYTES",
+ "crypto_aead_aegis128l_decrypt",
+ "crypto_aead_aegis128l_encrypt",
+ "crypto_aead_aegis256_ABYTES",
+ "crypto_aead_aegis256_KEYBYTES",
+ "crypto_aead_aegis256_MESSAGEBYTES_MAX",
+ "crypto_aead_aegis256_NPUBBYTES",
+ "crypto_aead_aegis256_NSECBYTES",
+ "crypto_aead_aegis256_decrypt",
+ "crypto_aead_aegis256_encrypt",
+ "crypto_aead_aes256gcm_ABYTES",
+ "crypto_aead_aes256gcm_KEYBYTES",
+ "crypto_aead_aes256gcm_MESSAGEBYTES_MAX",
+ "crypto_aead_aes256gcm_NPUBBYTES",
+ "crypto_aead_aes256gcm_NSECBYTES",
+ "crypto_aead_aes256gcm_decrypt",
+ "crypto_aead_aes256gcm_encrypt",
+ "crypto_aead_chacha20poly1305_ABYTES",
+ "crypto_aead_chacha20poly1305_KEYBYTES",
+ "crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX",
+ "crypto_aead_chacha20poly1305_NPUBBYTES",
+ "crypto_aead_chacha20poly1305_NSECBYTES",
+ "crypto_aead_chacha20poly1305_decrypt",
+ "crypto_aead_chacha20poly1305_encrypt",
+ "crypto_aead_chacha20poly1305_ietf_ABYTES",
+ "crypto_aead_chacha20poly1305_ietf_KEYBYTES",
+ "crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX",
+ "crypto_aead_chacha20poly1305_ietf_NPUBBYTES",
+ "crypto_aead_chacha20poly1305_ietf_NSECBYTES",
+ "crypto_aead_chacha20poly1305_ietf_decrypt",
+ "crypto_aead_chacha20poly1305_ietf_encrypt",
+ "crypto_aead_xchacha20poly1305_ietf_ABYTES",
+ "crypto_aead_xchacha20poly1305_ietf_KEYBYTES",
+ "crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX",
+ "crypto_aead_xchacha20poly1305_ietf_NPUBBYTES",
+ "crypto_aead_xchacha20poly1305_ietf_NSECBYTES",
+ "crypto_aead_xchacha20poly1305_ietf_decrypt",
+ "crypto_aead_xchacha20poly1305_ietf_encrypt",
+ "crypto_box_SECRETKEYBYTES",
+ "crypto_box_PUBLICKEYBYTES",
+ "crypto_box_SEEDBYTES",
+ "crypto_box_NONCEBYTES",
+ "crypto_box_ZEROBYTES",
+ "crypto_box_BOXZEROBYTES",
+ "crypto_box_BEFORENMBYTES",
+ "crypto_box_SEALBYTES",
+ "crypto_box_keypair",
+ "crypto_box",
+ "crypto_box_open",
+ "crypto_box_beforenm",
+ "crypto_box_afternm",
+ "crypto_box_open_afternm",
+ "crypto_box_easy",
+ "crypto_box_easy_afternm",
+ "crypto_box_open_easy",
+ "crypto_box_open_easy_afternm",
+ "crypto_box_seal",
+ "crypto_box_seal_open",
+ "crypto_box_seed_keypair",
+ "has_crypto_core_ed25519",
+ "crypto_core_ed25519_BYTES",
+ "crypto_core_ed25519_UNIFORMBYTES",
+ "crypto_core_ed25519_SCALARBYTES",
+ "crypto_core_ed25519_NONREDUCEDSCALARBYTES",
+ "crypto_core_ed25519_add",
+ "crypto_core_ed25519_from_uniform",
+ "crypto_core_ed25519_is_valid_point",
+ "crypto_core_ed25519_sub",
+ "crypto_core_ed25519_scalar_invert",
+ "crypto_core_ed25519_scalar_negate",
+ "crypto_core_ed25519_scalar_complement",
+ "crypto_core_ed25519_scalar_add",
+ "crypto_core_ed25519_scalar_sub",
+ "crypto_core_ed25519_scalar_mul",
+ "crypto_core_ed25519_scalar_reduce",
+ "crypto_hash_BYTES",
+ "crypto_hash_sha256_BYTES",
+ "crypto_hash_sha512_BYTES",
+ "crypto_hash",
+ "crypto_hash_sha256",
+ "crypto_hash_sha512",
+ "crypto_generichash_BYTES",
+ "crypto_generichash_BYTES_MIN",
+ "crypto_generichash_BYTES_MAX",
+ "crypto_generichash_KEYBYTES",
+ "crypto_generichash_KEYBYTES_MIN",
+ "crypto_generichash_KEYBYTES_MAX",
+ "crypto_generichash_SALTBYTES",
+ "crypto_generichash_PERSONALBYTES",
+ "crypto_generichash_STATEBYTES",
+ "crypto_generichash_blake2b_salt_personal",
+ "crypto_generichash_blake2b_init",
+ "crypto_generichash_blake2b_update",
+ "crypto_generichash_blake2b_final",
+ "crypto_kx_keypair",
+ "crypto_kx_seed_keypair",
+ "crypto_kx_client_session_keys",
+ "crypto_kx_server_session_keys",
+ "crypto_kx_PUBLIC_KEY_BYTES",
+ "crypto_kx_SECRET_KEY_BYTES",
+ "crypto_kx_SEED_BYTES",
+ "crypto_kx_SESSION_KEY_BYTES",
+ "has_crypto_scalarmult_ed25519",
+ "crypto_scalarmult_BYTES",
+ "crypto_scalarmult_SCALARBYTES",
+ "crypto_scalarmult",
+ "crypto_scalarmult_base",
+ "crypto_scalarmult_ed25519_BYTES",
+ "crypto_scalarmult_ed25519_SCALARBYTES",
+ "crypto_scalarmult_ed25519",
+ "crypto_scalarmult_ed25519_base",
+ "crypto_scalarmult_ed25519_noclamp",
+ "crypto_scalarmult_ed25519_base_noclamp",
+ "crypto_secretbox_KEYBYTES",
+ "crypto_secretbox_NONCEBYTES",
+ "crypto_secretbox_ZEROBYTES",
+ "crypto_secretbox_BOXZEROBYTES",
+ "crypto_secretbox_MACBYTES",
+ "crypto_secretbox_MESSAGEBYTES_MAX",
+ "crypto_secretbox",
+ "crypto_secretbox_easy",
+ "crypto_secretbox_open",
+ "crypto_secretbox_open_easy",
+ "crypto_secretstream_xchacha20poly1305_ABYTES",
+ "crypto_secretstream_xchacha20poly1305_HEADERBYTES",
+ "crypto_secretstream_xchacha20poly1305_KEYBYTES",
+ "crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX",
+ "crypto_secretstream_xchacha20poly1305_STATEBYTES",
+ "crypto_secretstream_xchacha20poly1305_TAG_FINAL",
+ "crypto_secretstream_xchacha20poly1305_TAG_MESSAGE",
+ "crypto_secretstream_xchacha20poly1305_TAG_PUSH",
+ "crypto_secretstream_xchacha20poly1305_TAG_REKEY",
+ "crypto_secretstream_xchacha20poly1305_init_pull",
+ "crypto_secretstream_xchacha20poly1305_init_push",
+ "crypto_secretstream_xchacha20poly1305_keygen",
+ "crypto_secretstream_xchacha20poly1305_pull",
+ "crypto_secretstream_xchacha20poly1305_push",
+ "crypto_secretstream_xchacha20poly1305_rekey",
+ "crypto_secretstream_xchacha20poly1305_state",
+ "has_crypto_shorthash_siphashx24",
+ "crypto_shorthash_siphash24_BYTES",
+ "crypto_shorthash_siphash24_KEYBYTES",
+ "crypto_shorthash_siphash24",
+ "crypto_shorthash_siphashx24_BYTES",
+ "crypto_shorthash_siphashx24_KEYBYTES",
+ "crypto_shorthash_siphashx24",
+ "crypto_sign_BYTES",
+ "crypto_sign_SEEDBYTES",
+ "crypto_sign_PUBLICKEYBYTES",
+ "crypto_sign_SECRETKEYBYTES",
+ "crypto_sign_keypair",
+ "crypto_sign_seed_keypair",
+ "crypto_sign",
+ "crypto_sign_open",
+ "crypto_sign_ed25519_pk_to_curve25519",
+ "crypto_sign_ed25519_sk_to_curve25519",
+ "crypto_sign_ed25519_sk_to_pk",
+ "crypto_sign_ed25519_sk_to_seed",
+ "crypto_sign_ed25519ph_STATEBYTES",
+ "crypto_sign_ed25519ph_final_create",
+ "crypto_sign_ed25519ph_final_verify",
+ "crypto_sign_ed25519ph_state",
+ "crypto_sign_ed25519ph_update",
+ "crypto_pwhash_ALG_ARGON2I13",
+ "crypto_pwhash_ALG_ARGON2ID13",
+ "crypto_pwhash_ALG_DEFAULT",
+ "crypto_pwhash_BYTES_MAX",
+ "crypto_pwhash_BYTES_MIN",
+ "crypto_pwhash_PASSWD_MAX",
+ "crypto_pwhash_PASSWD_MIN",
+ "crypto_pwhash_SALTBYTES",
+ "crypto_pwhash_STRBYTES",
+ "crypto_pwhash_alg",
+ "crypto_pwhash_argon2i_MEMLIMIT_MIN",
+ "crypto_pwhash_argon2i_MEMLIMIT_MAX",
+ "crypto_pwhash_argon2i_MEMLIMIT_INTERACTIVE",
+ "crypto_pwhash_argon2i_MEMLIMIT_MODERATE",
+ "crypto_pwhash_argon2i_MEMLIMIT_SENSITIVE",
+ "crypto_pwhash_argon2i_OPSLIMIT_MIN",
+ "crypto_pwhash_argon2i_OPSLIMIT_MAX",
+ "crypto_pwhash_argon2i_OPSLIMIT_INTERACTIVE",
+ "crypto_pwhash_argon2i_OPSLIMIT_MODERATE",
+ "crypto_pwhash_argon2i_OPSLIMIT_SENSITIVE",
+ "crypto_pwhash_argon2i_STRPREFIX",
+ "crypto_pwhash_argon2id_MEMLIMIT_MIN",
+ "crypto_pwhash_argon2id_MEMLIMIT_MAX",
+ "crypto_pwhash_argon2id_MEMLIMIT_INTERACTIVE",
+ "crypto_pwhash_argon2id_MEMLIMIT_MODERATE",
+ "crypto_pwhash_argon2id_OPSLIMIT_MIN",
+ "crypto_pwhash_argon2id_OPSLIMIT_MAX",
+ "crypto_pwhash_argon2id_MEMLIMIT_SENSITIVE",
+ "crypto_pwhash_argon2id_OPSLIMIT_INTERACTIVE",
+ "crypto_pwhash_argon2id_OPSLIMIT_MODERATE",
+ "crypto_pwhash_argon2id_OPSLIMIT_SENSITIVE",
+ "crypto_pwhash_argon2id_STRPREFIX",
+ "crypto_pwhash_str_alg",
+ "crypto_pwhash_str_verify",
+ "has_crypto_pwhash_scryptsalsa208sha256",
+ "crypto_pwhash_scryptsalsa208sha256_BYTES_MAX",
+ "crypto_pwhash_scryptsalsa208sha256_BYTES_MIN",
+ "crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_INTERACTIVE",
+ "crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MAX",
+ "crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MIN",
+ "crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_SENSITIVE",
+ "crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_INTERACTIVE",
+ "crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MAX",
+ "crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MIN",
+ "crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_SENSITIVE",
+ "crypto_pwhash_scryptsalsa208sha256_PASSWD_MAX",
+ "crypto_pwhash_scryptsalsa208sha256_PASSWD_MIN",
+ "crypto_pwhash_scryptsalsa208sha256_SALTBYTES",
+ "crypto_pwhash_scryptsalsa208sha256_STRBYTES",
+ "crypto_pwhash_scryptsalsa208sha256_STRPREFIX",
+ "crypto_pwhash_scryptsalsa208sha256_ll",
+ "crypto_pwhash_scryptsalsa208sha256_str",
+ "crypto_pwhash_scryptsalsa208sha256_str_verify",
+ "nacl_bindings_pick_scrypt_params",
+ "randombytes",
+ "randombytes_buf_deterministic",
+ "sodium_init",
+ "sodium_add",
+ "sodium_increment",
+ "sodium_memcmp",
+ "sodium_pad",
+ "sodium_unpad",
+]
+
+
+# Initialize Sodium
+sodium_init()
diff --git a/lib/nacl/bindings/__pycache__/__init__.cpython-314.pyc b/lib/nacl/bindings/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..8a3a7ac
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_aead.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_aead.cpython-314.pyc
new file mode 100644
index 0000000..c50a466
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_aead.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_box.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_box.cpython-314.pyc
new file mode 100644
index 0000000..0b2d923
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_box.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_core.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_core.cpython-314.pyc
new file mode 100644
index 0000000..e7db98c
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_core.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_generichash.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_generichash.cpython-314.pyc
new file mode 100644
index 0000000..6de624a
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_generichash.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_hash.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_hash.cpython-314.pyc
new file mode 100644
index 0000000..2ffe088
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_hash.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_kx.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_kx.cpython-314.pyc
new file mode 100644
index 0000000..8f5c70e
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_kx.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_pwhash.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_pwhash.cpython-314.pyc
new file mode 100644
index 0000000..9b3c545
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_pwhash.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_scalarmult.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_scalarmult.cpython-314.pyc
new file mode 100644
index 0000000..bc729de
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_scalarmult.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_secretbox.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_secretbox.cpython-314.pyc
new file mode 100644
index 0000000..16e5353
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_secretbox.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_secretstream.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_secretstream.cpython-314.pyc
new file mode 100644
index 0000000..616e681
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_secretstream.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_shorthash.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_shorthash.cpython-314.pyc
new file mode 100644
index 0000000..557819f
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_shorthash.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/crypto_sign.cpython-314.pyc b/lib/nacl/bindings/__pycache__/crypto_sign.cpython-314.pyc
new file mode 100644
index 0000000..ce11b07
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/crypto_sign.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/randombytes.cpython-314.pyc b/lib/nacl/bindings/__pycache__/randombytes.cpython-314.pyc
new file mode 100644
index 0000000..7b5f524
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/randombytes.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/sodium_core.cpython-314.pyc b/lib/nacl/bindings/__pycache__/sodium_core.cpython-314.pyc
new file mode 100644
index 0000000..315e647
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/sodium_core.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/__pycache__/utils.cpython-314.pyc b/lib/nacl/bindings/__pycache__/utils.cpython-314.pyc
new file mode 100644
index 0000000..07a160a
Binary files /dev/null and b/lib/nacl/bindings/__pycache__/utils.cpython-314.pyc differ
diff --git a/lib/nacl/bindings/crypto_aead.py b/lib/nacl/bindings/crypto_aead.py
new file mode 100644
index 0000000..2f7da78
--- /dev/null
+++ b/lib/nacl/bindings/crypto_aead.py
@@ -0,0 +1,1069 @@
+# Copyright 2017 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Optional
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+"""
+Implementations of authenticated encription with associated data (*AEAD*)
+constructions building on the chacha20 stream cipher and the poly1305
+authenticator
+"""
+
+crypto_aead_chacha20poly1305_ietf_KEYBYTES: int = (
+ lib.crypto_aead_chacha20poly1305_ietf_keybytes()
+)
+crypto_aead_chacha20poly1305_ietf_NSECBYTES: int = (
+ lib.crypto_aead_chacha20poly1305_ietf_nsecbytes()
+)
+crypto_aead_chacha20poly1305_ietf_NPUBBYTES: int = (
+ lib.crypto_aead_chacha20poly1305_ietf_npubbytes()
+)
+crypto_aead_chacha20poly1305_ietf_ABYTES: int = (
+ lib.crypto_aead_chacha20poly1305_ietf_abytes()
+)
+crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX: int = (
+ lib.crypto_aead_chacha20poly1305_ietf_messagebytes_max()
+)
+_aead_chacha20poly1305_ietf_CRYPTBYTES_MAX = (
+ crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX
+ + crypto_aead_chacha20poly1305_ietf_ABYTES
+)
+
+crypto_aead_chacha20poly1305_KEYBYTES: int = (
+ lib.crypto_aead_chacha20poly1305_keybytes()
+)
+crypto_aead_chacha20poly1305_NSECBYTES: int = (
+ lib.crypto_aead_chacha20poly1305_nsecbytes()
+)
+crypto_aead_chacha20poly1305_NPUBBYTES: int = (
+ lib.crypto_aead_chacha20poly1305_npubbytes()
+)
+crypto_aead_chacha20poly1305_ABYTES: int = (
+ lib.crypto_aead_chacha20poly1305_abytes()
+)
+crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX: int = (
+ lib.crypto_aead_chacha20poly1305_messagebytes_max()
+)
+_aead_chacha20poly1305_CRYPTBYTES_MAX = (
+ crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX
+ + crypto_aead_chacha20poly1305_ABYTES
+)
+
+crypto_aead_xchacha20poly1305_ietf_KEYBYTES: int = (
+ lib.crypto_aead_xchacha20poly1305_ietf_keybytes()
+)
+crypto_aead_xchacha20poly1305_ietf_NSECBYTES: int = (
+ lib.crypto_aead_xchacha20poly1305_ietf_nsecbytes()
+)
+crypto_aead_xchacha20poly1305_ietf_NPUBBYTES: int = (
+ lib.crypto_aead_xchacha20poly1305_ietf_npubbytes()
+)
+crypto_aead_xchacha20poly1305_ietf_ABYTES: int = (
+ lib.crypto_aead_xchacha20poly1305_ietf_abytes()
+)
+crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX: int = (
+ lib.crypto_aead_xchacha20poly1305_ietf_messagebytes_max()
+)
+_aead_xchacha20poly1305_ietf_CRYPTBYTES_MAX = (
+ crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX
+ + crypto_aead_xchacha20poly1305_ietf_ABYTES
+)
+
+crypto_aead_aegis256_KEYBYTES: int = lib.crypto_aead_aegis256_keybytes()
+crypto_aead_aegis256_NSECBYTES: int = lib.crypto_aead_aegis256_nsecbytes()
+crypto_aead_aegis256_NPUBBYTES: int = lib.crypto_aead_aegis256_npubbytes()
+crypto_aead_aegis256_ABYTES: int = lib.crypto_aead_aegis256_abytes()
+crypto_aead_aegis256_MESSAGEBYTES_MAX: int = (
+ lib.crypto_aead_aegis256_messagebytes_max()
+)
+_aead_aegis256_CRYPTBYTES_MAX = (
+ crypto_aead_aegis256_MESSAGEBYTES_MAX + crypto_aead_aegis256_ABYTES
+)
+
+crypto_aead_aegis128l_KEYBYTES: int = lib.crypto_aead_aegis128l_keybytes()
+crypto_aead_aegis128l_NSECBYTES: int = lib.crypto_aead_aegis128l_nsecbytes()
+crypto_aead_aegis128l_NPUBBYTES: int = lib.crypto_aead_aegis128l_npubbytes()
+crypto_aead_aegis128l_ABYTES: int = lib.crypto_aead_aegis128l_abytes()
+crypto_aead_aegis128l_MESSAGEBYTES_MAX: int = (
+ lib.crypto_aead_aegis128l_messagebytes_max()
+)
+_aead_aegis256_CRYPTBYTES_MAX = (
+ crypto_aead_aegis128l_MESSAGEBYTES_MAX + crypto_aead_aegis128l_ABYTES
+)
+
+crypto_aead_aes256gcm_KEYBYTES: int = lib.crypto_aead_aes256gcm_keybytes()
+crypto_aead_aes256gcm_NSECBYTES: int = lib.crypto_aead_aes256gcm_nsecbytes()
+crypto_aead_aes256gcm_NPUBBYTES: int = lib.crypto_aead_aes256gcm_npubbytes()
+crypto_aead_aes256gcm_ABYTES: int = lib.crypto_aead_aes256gcm_abytes()
+crypto_aead_aes256gcm_MESSAGEBYTES_MAX: int = (
+ lib.crypto_aead_aes256gcm_messagebytes_max()
+)
+_aead_aegis256_CRYPTBYTES_MAX = (
+ crypto_aead_aes256gcm_MESSAGEBYTES_MAX + crypto_aead_aes256gcm_ABYTES
+)
+
+
+def crypto_aead_chacha20poly1305_ietf_encrypt(
+ message: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Encrypt the given ``message`` using the IETF ratified chacha20poly1305
+ construction described in RFC7539.
+
+ :param message:
+ :type message: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: authenticated ciphertext
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(message, bytes),
+ "Input message type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ mlen = len(message)
+
+ ensure(
+ mlen <= crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX,
+ "Message must be at most {} bytes long".format(
+ crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_chacha20poly1305_ietf_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_chacha20poly1305_ietf_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes)
+ and len(key) == crypto_aead_chacha20poly1305_ietf_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_chacha20poly1305_ietf_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ mxout = mlen + crypto_aead_chacha20poly1305_ietf_ABYTES
+
+ clen = ffi.new("unsigned long long *")
+
+ ciphertext = ffi.new("unsigned char[]", mxout)
+
+ res = lib.crypto_aead_chacha20poly1305_ietf_encrypt(
+ ciphertext, clen, message, mlen, _aad, aalen, ffi.NULL, nonce, key
+ )
+
+ ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
+ return ffi.buffer(ciphertext, clen[0])[:]
+
+
+def crypto_aead_chacha20poly1305_ietf_decrypt(
+ ciphertext: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Decrypt the given ``ciphertext`` using the IETF ratified chacha20poly1305
+ construction described in RFC7539.
+
+ :param ciphertext:
+ :type ciphertext: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: message
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(ciphertext, bytes),
+ "Input ciphertext type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ clen = len(ciphertext)
+
+ ensure(
+ clen <= _aead_chacha20poly1305_ietf_CRYPTBYTES_MAX,
+ "Ciphertext must be at most {} bytes long".format(
+ _aead_chacha20poly1305_ietf_CRYPTBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_chacha20poly1305_ietf_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_chacha20poly1305_ietf_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes)
+ and len(key) == crypto_aead_chacha20poly1305_ietf_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_chacha20poly1305_ietf_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ mxout = clen - crypto_aead_chacha20poly1305_ietf_ABYTES
+
+ mlen = ffi.new("unsigned long long *")
+ message = ffi.new("unsigned char[]", mxout)
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ res = lib.crypto_aead_chacha20poly1305_ietf_decrypt(
+ message, mlen, ffi.NULL, ciphertext, clen, _aad, aalen, nonce, key
+ )
+
+ ensure(res == 0, "Decryption failed.", raising=exc.CryptoError)
+
+ return ffi.buffer(message, mlen[0])[:]
+
+
+def crypto_aead_chacha20poly1305_encrypt(
+ message: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Encrypt the given ``message`` using the "legacy" construction
+ described in draft-agl-tls-chacha20poly1305.
+
+ :param message:
+ :type message: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: authenticated ciphertext
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(message, bytes),
+ "Input message type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ mlen = len(message)
+
+ ensure(
+ mlen <= crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX,
+ "Message must be at most {} bytes long".format(
+ crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_chacha20poly1305_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_chacha20poly1305_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes)
+ and len(key) == crypto_aead_chacha20poly1305_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_chacha20poly1305_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ mxout = mlen + crypto_aead_chacha20poly1305_ietf_ABYTES
+
+ clen = ffi.new("unsigned long long *")
+
+ ciphertext = ffi.new("unsigned char[]", mxout)
+
+ res = lib.crypto_aead_chacha20poly1305_encrypt(
+ ciphertext, clen, message, mlen, _aad, aalen, ffi.NULL, nonce, key
+ )
+
+ ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
+ return ffi.buffer(ciphertext, clen[0])[:]
+
+
+def crypto_aead_chacha20poly1305_decrypt(
+ ciphertext: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Decrypt the given ``ciphertext`` using the "legacy" construction
+ described in draft-agl-tls-chacha20poly1305.
+
+ :param ciphertext: authenticated ciphertext
+ :type ciphertext: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: message
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(ciphertext, bytes),
+ "Input ciphertext type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ clen = len(ciphertext)
+
+ ensure(
+ clen <= _aead_chacha20poly1305_CRYPTBYTES_MAX,
+ "Ciphertext must be at most {} bytes long".format(
+ _aead_chacha20poly1305_CRYPTBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_chacha20poly1305_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_chacha20poly1305_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes)
+ and len(key) == crypto_aead_chacha20poly1305_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_chacha20poly1305_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ mxout = clen - crypto_aead_chacha20poly1305_ABYTES
+
+ mlen = ffi.new("unsigned long long *")
+ message = ffi.new("unsigned char[]", mxout)
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ res = lib.crypto_aead_chacha20poly1305_decrypt(
+ message, mlen, ffi.NULL, ciphertext, clen, _aad, aalen, nonce, key
+ )
+
+ ensure(res == 0, "Decryption failed.", raising=exc.CryptoError)
+
+ return ffi.buffer(message, mlen[0])[:]
+
+
+def crypto_aead_xchacha20poly1305_ietf_encrypt(
+ message: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Encrypt the given ``message`` using the long-nonces xchacha20poly1305
+ construction.
+
+ :param message:
+ :type message: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: authenticated ciphertext
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(message, bytes),
+ "Input message type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ mlen = len(message)
+
+ ensure(
+ mlen <= crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX,
+ "Message must be at most {} bytes long".format(
+ crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_xchacha20poly1305_ietf_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes)
+ and len(key) == crypto_aead_xchacha20poly1305_ietf_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_xchacha20poly1305_ietf_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ mxout = mlen + crypto_aead_xchacha20poly1305_ietf_ABYTES
+
+ clen = ffi.new("unsigned long long *")
+
+ ciphertext = ffi.new("unsigned char[]", mxout)
+
+ res = lib.crypto_aead_xchacha20poly1305_ietf_encrypt(
+ ciphertext, clen, message, mlen, _aad, aalen, ffi.NULL, nonce, key
+ )
+
+ ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
+ return ffi.buffer(ciphertext, clen[0])[:]
+
+
+def crypto_aead_xchacha20poly1305_ietf_decrypt(
+ ciphertext: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Decrypt the given ``ciphertext`` using the long-nonces xchacha20poly1305
+ construction.
+
+ :param ciphertext: authenticated ciphertext
+ :type ciphertext: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: message
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(ciphertext, bytes),
+ "Input ciphertext type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ clen = len(ciphertext)
+
+ ensure(
+ clen <= _aead_xchacha20poly1305_ietf_CRYPTBYTES_MAX,
+ "Ciphertext must be at most {} bytes long".format(
+ _aead_xchacha20poly1305_ietf_CRYPTBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_xchacha20poly1305_ietf_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes)
+ and len(key) == crypto_aead_xchacha20poly1305_ietf_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_xchacha20poly1305_ietf_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ mxout = clen - crypto_aead_xchacha20poly1305_ietf_ABYTES
+ mlen = ffi.new("unsigned long long *")
+ message = ffi.new("unsigned char[]", mxout)
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ res = lib.crypto_aead_xchacha20poly1305_ietf_decrypt(
+ message, mlen, ffi.NULL, ciphertext, clen, _aad, aalen, nonce, key
+ )
+
+ ensure(res == 0, "Decryption failed.", raising=exc.CryptoError)
+
+ return ffi.buffer(message, mlen[0])[:]
+
+
+def crypto_aead_aegis256_encrypt(
+ message: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Encrypt the given ``message`` using the AEGIS-256
+ construction.
+
+ :param message:
+ :type message: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: authenticated ciphertext
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(message, bytes),
+ "Input message type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ mlen = len(message)
+
+ ensure(
+ mlen <= crypto_aead_aegis256_MESSAGEBYTES_MAX,
+ "Message must be at most {} bytes long".format(
+ crypto_aead_aegis256_MESSAGEBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_aegis256_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_aegis256_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes) and len(key) == crypto_aead_aegis256_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_aegis256_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ mxout = mlen + crypto_aead_aegis256_ABYTES
+
+ clen = ffi.new("unsigned long long *")
+
+ ciphertext = ffi.new("unsigned char[]", mxout)
+
+ res = lib.crypto_aead_aegis256_encrypt(
+ ciphertext, clen, message, mlen, _aad, aalen, ffi.NULL, nonce, key
+ )
+
+ ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
+ return ffi.buffer(ciphertext, clen[0])[:]
+
+
+def crypto_aead_aegis256_decrypt(
+ ciphertext: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Decrypt the given ``ciphertext`` using the AEGIS-256
+ construction.
+
+ :param ciphertext: authenticated ciphertext
+ :type ciphertext: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: message
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(ciphertext, bytes),
+ "Input ciphertext type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ clen = len(ciphertext)
+
+ ensure(
+ clen <= _aead_aegis256_CRYPTBYTES_MAX,
+ "Ciphertext must be at most {} bytes long".format(
+ _aead_aegis256_CRYPTBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_aegis256_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_aegis256_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes) and len(key) == crypto_aead_aegis256_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_aegis256_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ mxout = clen - crypto_aead_aegis256_ABYTES
+ mlen = ffi.new("unsigned long long *")
+ message = ffi.new("unsigned char[]", mxout)
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ res = lib.crypto_aead_aegis256_decrypt(
+ message, mlen, ffi.NULL, ciphertext, clen, _aad, aalen, nonce, key
+ )
+
+ ensure(res == 0, "Decryption failed.", raising=exc.CryptoError)
+
+ return ffi.buffer(message, mlen[0])[:]
+
+
+def crypto_aead_aegis128l_encrypt(
+ message: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Encrypt the given ``message`` using the AEGIS-128L
+ construction.
+
+ :param message:
+ :type message: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: authenticated ciphertext
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(message, bytes),
+ "Input message type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ mlen = len(message)
+
+ ensure(
+ mlen <= crypto_aead_aegis128l_MESSAGEBYTES_MAX,
+ "Message must be at most {} bytes long".format(
+ crypto_aead_aegis128l_MESSAGEBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_aegis128l_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_aegis128l_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes) and len(key) == crypto_aead_aegis128l_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_aegis128l_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ mxout = mlen + crypto_aead_aegis128l_ABYTES
+
+ clen = ffi.new("unsigned long long *")
+
+ ciphertext = ffi.new("unsigned char[]", mxout)
+
+ res = lib.crypto_aead_aegis128l_encrypt(
+ ciphertext, clen, message, mlen, _aad, aalen, ffi.NULL, nonce, key
+ )
+
+ ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
+ return ffi.buffer(ciphertext, clen[0])[:]
+
+
+def crypto_aead_aegis128l_decrypt(
+ ciphertext: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Decrypt the given ``ciphertext`` using the AEGIS-128L
+ construction.
+
+ :param ciphertext: authenticated ciphertext
+ :type ciphertext: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: message
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(ciphertext, bytes),
+ "Input ciphertext type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ clen = len(ciphertext)
+
+ ensure(
+ clen <= _aead_aegis256_CRYPTBYTES_MAX,
+ "Ciphertext must be at most {} bytes long".format(
+ _aead_aegis256_CRYPTBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_aegis128l_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_aegis128l_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes) and len(key) == crypto_aead_aegis128l_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_aegis128l_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ mxout = clen - crypto_aead_aegis128l_ABYTES
+ mlen = ffi.new("unsigned long long *")
+ message = ffi.new("unsigned char[]", mxout)
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ res = lib.crypto_aead_aegis128l_decrypt(
+ message, mlen, ffi.NULL, ciphertext, clen, _aad, aalen, nonce, key
+ )
+
+ ensure(res == 0, "Decryption failed.", raising=exc.CryptoError)
+
+ return ffi.buffer(message, mlen[0])[:]
+
+
+def crypto_aead_aes256gcm_encrypt(
+ message: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Encrypt the given ``message`` using the AES-256-GCM
+ construction. Requires the Intel AES-NI extensions,
+ or the ARM Crypto extensions.
+
+ :param message:
+ :type message: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: authenticated ciphertext
+ :rtype: bytes
+ """
+ ensure(
+ lib.crypto_aead_aes256gcm_is_available() == 1,
+ "Construction requires hardware acceleration",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(message, bytes),
+ "Input message type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ mlen = len(message)
+
+ ensure(
+ mlen <= crypto_aead_aes256gcm_MESSAGEBYTES_MAX,
+ "Message must be at most {} bytes long".format(
+ crypto_aead_aes256gcm_MESSAGEBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_aes256gcm_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_aes256gcm_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes) and len(key) == crypto_aead_aes256gcm_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_aes256gcm_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ mxout = mlen + crypto_aead_aes256gcm_ABYTES
+
+ clen = ffi.new("unsigned long long *")
+
+ ciphertext = ffi.new("unsigned char[]", mxout)
+
+ res = lib.crypto_aead_aes256gcm_encrypt(
+ ciphertext, clen, message, mlen, _aad, aalen, ffi.NULL, nonce, key
+ )
+
+ ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
+ return ffi.buffer(ciphertext, clen[0])[:]
+
+
+def crypto_aead_aes256gcm_decrypt(
+ ciphertext: bytes, aad: Optional[bytes], nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Decrypt the given ``ciphertext`` using the AES-256-GCM
+ construction. Requires the Intel AES-NI extensions,
+ or the ARM Crypto extensions.
+
+ :param ciphertext: authenticated ciphertext
+ :type ciphertext: bytes
+ :param aad:
+ :type aad: Optional[bytes]
+ :param nonce:
+ :type nonce: bytes
+ :param key:
+ :type key: bytes
+ :return: message
+ :rtype: bytes
+ """
+ ensure(
+ lib.crypto_aead_aes256gcm_is_available() == 1,
+ "Construction requires hardware acceleration",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(ciphertext, bytes),
+ "Input ciphertext type must be bytes",
+ raising=exc.TypeError,
+ )
+
+ clen = len(ciphertext)
+
+ ensure(
+ clen <= _aead_aegis256_CRYPTBYTES_MAX,
+ "Ciphertext must be at most {} bytes long".format(
+ _aead_aegis256_CRYPTBYTES_MAX
+ ),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ isinstance(aad, bytes) or (aad is None),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(nonce, bytes)
+ and len(nonce) == crypto_aead_aes256gcm_NPUBBYTES,
+ "Nonce must be a {} bytes long bytes sequence".format(
+ crypto_aead_aes256gcm_NPUBBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(key, bytes) and len(key) == crypto_aead_aes256gcm_KEYBYTES,
+ "Key must be a {} bytes long bytes sequence".format(
+ crypto_aead_aes256gcm_KEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ mxout = clen - crypto_aead_aes256gcm_ABYTES
+ mlen = ffi.new("unsigned long long *")
+ message = ffi.new("unsigned char[]", mxout)
+
+ if aad:
+ _aad = aad
+ aalen = len(aad)
+ else:
+ _aad = ffi.NULL
+ aalen = 0
+
+ res = lib.crypto_aead_aes256gcm_decrypt(
+ message, mlen, ffi.NULL, ciphertext, clen, _aad, aalen, nonce, key
+ )
+
+ ensure(res == 0, "Decryption failed.", raising=exc.CryptoError)
+
+ return ffi.buffer(message, mlen[0])[:]
diff --git a/lib/nacl/bindings/crypto_box.py b/lib/nacl/bindings/crypto_box.py
new file mode 100644
index 0000000..da6e4cb
--- /dev/null
+++ b/lib/nacl/bindings/crypto_box.py
@@ -0,0 +1,475 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Tuple
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+__all__ = ["crypto_box_keypair", "crypto_box"]
+
+
+crypto_box_SECRETKEYBYTES: int = lib.crypto_box_secretkeybytes()
+crypto_box_PUBLICKEYBYTES: int = lib.crypto_box_publickeybytes()
+crypto_box_SEEDBYTES: int = lib.crypto_box_seedbytes()
+crypto_box_NONCEBYTES: int = lib.crypto_box_noncebytes()
+crypto_box_ZEROBYTES: int = lib.crypto_box_zerobytes()
+crypto_box_BOXZEROBYTES: int = lib.crypto_box_boxzerobytes()
+crypto_box_BEFORENMBYTES: int = lib.crypto_box_beforenmbytes()
+crypto_box_SEALBYTES: int = lib.crypto_box_sealbytes()
+crypto_box_MACBYTES: int = lib.crypto_box_macbytes()
+
+
+def crypto_box_keypair() -> Tuple[bytes, bytes]:
+ """
+ Returns a randomly generated public and secret key.
+
+ :rtype: (bytes(public_key), bytes(secret_key))
+ """
+ pk = ffi.new("unsigned char[]", crypto_box_PUBLICKEYBYTES)
+ sk = ffi.new("unsigned char[]", crypto_box_SECRETKEYBYTES)
+
+ rc = lib.crypto_box_keypair(pk, sk)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return (
+ ffi.buffer(pk, crypto_box_PUBLICKEYBYTES)[:],
+ ffi.buffer(sk, crypto_box_SECRETKEYBYTES)[:],
+ )
+
+
+def crypto_box_seed_keypair(seed: bytes) -> Tuple[bytes, bytes]:
+ """
+ Returns a (public, secret) key pair deterministically generated
+ from an input ``seed``.
+
+ .. warning:: The seed **must** be high-entropy; therefore,
+ its generator **must** be a cryptographic quality
+ random function like, for example, :func:`~nacl.utils.random`.
+
+ .. warning:: The seed **must** be protected and remain secret.
+ Anyone who knows the seed is really in possession of
+ the corresponding PrivateKey.
+
+
+ :param seed: bytes
+ :rtype: (bytes(public_key), bytes(secret_key))
+ """
+ ensure(isinstance(seed, bytes), "seed must be bytes", raising=TypeError)
+
+ if len(seed) != crypto_box_SEEDBYTES:
+ raise exc.ValueError("Invalid seed")
+
+ pk = ffi.new("unsigned char[]", crypto_box_PUBLICKEYBYTES)
+ sk = ffi.new("unsigned char[]", crypto_box_SECRETKEYBYTES)
+
+ rc = lib.crypto_box_seed_keypair(pk, sk, seed)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return (
+ ffi.buffer(pk, crypto_box_PUBLICKEYBYTES)[:],
+ ffi.buffer(sk, crypto_box_SECRETKEYBYTES)[:],
+ )
+
+
+def crypto_box(message: bytes, nonce: bytes, pk: bytes, sk: bytes) -> bytes:
+ """
+ Encrypts and returns a message ``message`` using the secret key ``sk``,
+ public key ``pk``, and the nonce ``nonce``.
+
+ :param message: bytes
+ :param nonce: bytes
+ :param pk: bytes
+ :param sk: bytes
+ :rtype: bytes
+ """
+ if len(nonce) != crypto_box_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce size")
+
+ if len(pk) != crypto_box_PUBLICKEYBYTES:
+ raise exc.ValueError("Invalid public key")
+
+ if len(sk) != crypto_box_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid secret key")
+
+ padded = (b"\x00" * crypto_box_ZEROBYTES) + message
+ ciphertext = ffi.new("unsigned char[]", len(padded))
+
+ rc = lib.crypto_box(ciphertext, padded, len(padded), nonce, pk, sk)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(ciphertext, len(padded))[crypto_box_BOXZEROBYTES:]
+
+
+def crypto_box_open(
+ ciphertext: bytes, nonce: bytes, pk: bytes, sk: bytes
+) -> bytes:
+ """
+ Decrypts and returns an encrypted message ``ciphertext``, using the secret
+ key ``sk``, public key ``pk``, and the nonce ``nonce``.
+
+ :param ciphertext: bytes
+ :param nonce: bytes
+ :param pk: bytes
+ :param sk: bytes
+ :rtype: bytes
+ """
+ if len(nonce) != crypto_box_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce size")
+
+ if len(pk) != crypto_box_PUBLICKEYBYTES:
+ raise exc.ValueError("Invalid public key")
+
+ if len(sk) != crypto_box_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid secret key")
+
+ padded = (b"\x00" * crypto_box_BOXZEROBYTES) + ciphertext
+ plaintext = ffi.new("unsigned char[]", len(padded))
+
+ res = lib.crypto_box_open(plaintext, padded, len(padded), nonce, pk, sk)
+ ensure(
+ res == 0,
+ "An error occurred trying to decrypt the message",
+ raising=exc.CryptoError,
+ )
+
+ return ffi.buffer(plaintext, len(padded))[crypto_box_ZEROBYTES:]
+
+
+def crypto_box_beforenm(pk: bytes, sk: bytes) -> bytes:
+ """
+ Computes and returns the shared key for the public key ``pk`` and the
+ secret key ``sk``. This can be used to speed up operations where the same
+ set of keys is going to be used multiple times.
+
+ :param pk: bytes
+ :param sk: bytes
+ :rtype: bytes
+ """
+ if len(pk) != crypto_box_PUBLICKEYBYTES:
+ raise exc.ValueError("Invalid public key")
+
+ if len(sk) != crypto_box_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid secret key")
+
+ k = ffi.new("unsigned char[]", crypto_box_BEFORENMBYTES)
+
+ rc = lib.crypto_box_beforenm(k, pk, sk)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(k, crypto_box_BEFORENMBYTES)[:]
+
+
+def crypto_box_afternm(message: bytes, nonce: bytes, k: bytes) -> bytes:
+ """
+ Encrypts and returns the message ``message`` using the shared key ``k`` and
+ the nonce ``nonce``.
+
+ :param message: bytes
+ :param nonce: bytes
+ :param k: bytes
+ :rtype: bytes
+ """
+ if len(nonce) != crypto_box_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce")
+
+ if len(k) != crypto_box_BEFORENMBYTES:
+ raise exc.ValueError("Invalid shared key")
+
+ padded = b"\x00" * crypto_box_ZEROBYTES + message
+ ciphertext = ffi.new("unsigned char[]", len(padded))
+
+ rc = lib.crypto_box_afternm(ciphertext, padded, len(padded), nonce, k)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(ciphertext, len(padded))[crypto_box_BOXZEROBYTES:]
+
+
+def crypto_box_open_afternm(
+ ciphertext: bytes, nonce: bytes, k: bytes
+) -> bytes:
+ """
+ Decrypts and returns the encrypted message ``ciphertext``, using the shared
+ key ``k`` and the nonce ``nonce``.
+
+ :param ciphertext: bytes
+ :param nonce: bytes
+ :param k: bytes
+ :rtype: bytes
+ """
+ if len(nonce) != crypto_box_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce")
+
+ if len(k) != crypto_box_BEFORENMBYTES:
+ raise exc.ValueError("Invalid shared key")
+
+ padded = (b"\x00" * crypto_box_BOXZEROBYTES) + ciphertext
+ plaintext = ffi.new("unsigned char[]", len(padded))
+
+ res = lib.crypto_box_open_afternm(plaintext, padded, len(padded), nonce, k)
+ ensure(
+ res == 0,
+ "An error occurred trying to decrypt the message",
+ raising=exc.CryptoError,
+ )
+
+ return ffi.buffer(plaintext, len(padded))[crypto_box_ZEROBYTES:]
+
+
+def crypto_box_easy(
+ message: bytes, nonce: bytes, pk: bytes, sk: bytes
+) -> bytes:
+ """
+ Encrypts and returns a message ``message`` using the secret key ``sk``,
+ public key ``pk``, and the nonce ``nonce``.
+
+ :param message: bytes
+ :param nonce: bytes
+ :param pk: bytes
+ :param sk: bytes
+ :rtype: bytes
+ """
+ if len(nonce) != crypto_box_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce size")
+
+ if len(pk) != crypto_box_PUBLICKEYBYTES:
+ raise exc.ValueError("Invalid public key")
+
+ if len(sk) != crypto_box_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid secret key")
+
+ _mlen = len(message)
+ _clen = crypto_box_MACBYTES + _mlen
+
+ ciphertext = ffi.new("unsigned char[]", _clen)
+
+ rc = lib.crypto_box_easy(ciphertext, message, _mlen, nonce, pk, sk)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(ciphertext, _clen)[:]
+
+
+def crypto_box_open_easy(
+ ciphertext: bytes, nonce: bytes, pk: bytes, sk: bytes
+) -> bytes:
+ """
+ Decrypts and returns an encrypted message ``ciphertext``, using the secret
+ key ``sk``, public key ``pk``, and the nonce ``nonce``.
+
+ :param ciphertext: bytes
+ :param nonce: bytes
+ :param pk: bytes
+ :param sk: bytes
+ :rtype: bytes
+ """
+ if len(nonce) != crypto_box_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce size")
+
+ if len(pk) != crypto_box_PUBLICKEYBYTES:
+ raise exc.ValueError("Invalid public key")
+
+ if len(sk) != crypto_box_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid secret key")
+
+ _clen = len(ciphertext)
+
+ ensure(
+ _clen >= crypto_box_MACBYTES,
+ "Input ciphertext must be at least {} long".format(
+ crypto_box_MACBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ _mlen = _clen - crypto_box_MACBYTES
+
+ plaintext = ffi.new("unsigned char[]", max(1, _mlen))
+
+ res = lib.crypto_box_open_easy(plaintext, ciphertext, _clen, nonce, pk, sk)
+ ensure(
+ res == 0,
+ "An error occurred trying to decrypt the message",
+ raising=exc.CryptoError,
+ )
+
+ return ffi.buffer(plaintext, _mlen)[:]
+
+
+def crypto_box_easy_afternm(message: bytes, nonce: bytes, k: bytes) -> bytes:
+ """
+ Encrypts and returns the message ``message`` using the shared key ``k`` and
+ the nonce ``nonce``.
+
+ :param message: bytes
+ :param nonce: bytes
+ :param k: bytes
+ :rtype: bytes
+ """
+ if len(nonce) != crypto_box_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce")
+
+ if len(k) != crypto_box_BEFORENMBYTES:
+ raise exc.ValueError("Invalid shared key")
+
+ _mlen = len(message)
+ _clen = crypto_box_MACBYTES + _mlen
+
+ ciphertext = ffi.new("unsigned char[]", _clen)
+
+ rc = lib.crypto_box_easy_afternm(ciphertext, message, _mlen, nonce, k)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(ciphertext, _clen)[:]
+
+
+def crypto_box_open_easy_afternm(
+ ciphertext: bytes, nonce: bytes, k: bytes
+) -> bytes:
+ """
+ Decrypts and returns the encrypted message ``ciphertext``, using the shared
+ key ``k`` and the nonce ``nonce``.
+
+ :param ciphertext: bytes
+ :param nonce: bytes
+ :param k: bytes
+ :rtype: bytes
+ """
+ if len(nonce) != crypto_box_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce")
+
+ if len(k) != crypto_box_BEFORENMBYTES:
+ raise exc.ValueError("Invalid shared key")
+
+ _clen = len(ciphertext)
+
+ ensure(
+ _clen >= crypto_box_MACBYTES,
+ "Input ciphertext must be at least {} long".format(
+ crypto_box_MACBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ _mlen = _clen - crypto_box_MACBYTES
+
+ plaintext = ffi.new("unsigned char[]", max(1, _mlen))
+
+ res = lib.crypto_box_open_easy_afternm(
+ plaintext, ciphertext, _clen, nonce, k
+ )
+ ensure(
+ res == 0,
+ "An error occurred trying to decrypt the message",
+ raising=exc.CryptoError,
+ )
+
+ return ffi.buffer(plaintext, _mlen)[:]
+
+
+def crypto_box_seal(message: bytes, pk: bytes) -> bytes:
+ """
+ Encrypts and returns a message ``message`` using an ephemeral secret key
+ and the public key ``pk``.
+ The ephemeral public key, which is embedded in the sealed box, is also
+ used, in combination with ``pk``, to derive the nonce needed for the
+ underlying box construct.
+
+ :param message: bytes
+ :param pk: bytes
+ :rtype: bytes
+
+ .. versionadded:: 1.2
+ """
+ ensure(
+ isinstance(message, bytes),
+ "input message must be bytes",
+ raising=TypeError,
+ )
+
+ ensure(
+ isinstance(pk, bytes), "public key must be bytes", raising=TypeError
+ )
+
+ if len(pk) != crypto_box_PUBLICKEYBYTES:
+ raise exc.ValueError("Invalid public key")
+
+ _mlen = len(message)
+ _clen = crypto_box_SEALBYTES + _mlen
+
+ ciphertext = ffi.new("unsigned char[]", _clen)
+
+ rc = lib.crypto_box_seal(ciphertext, message, _mlen, pk)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(ciphertext, _clen)[:]
+
+
+def crypto_box_seal_open(ciphertext: bytes, pk: bytes, sk: bytes) -> bytes:
+ """
+ Decrypts and returns an encrypted message ``ciphertext``, using the
+ recipent's secret key ``sk`` and the sender's ephemeral public key
+ embedded in the sealed box. The box construct nonce is derived from
+ the recipient's public key ``pk`` and the sender's public key.
+
+ :param ciphertext: bytes
+ :param pk: bytes
+ :param sk: bytes
+ :rtype: bytes
+
+ .. versionadded:: 1.2
+ """
+ ensure(
+ isinstance(ciphertext, bytes),
+ "input ciphertext must be bytes",
+ raising=TypeError,
+ )
+
+ ensure(
+ isinstance(pk, bytes), "public key must be bytes", raising=TypeError
+ )
+
+ ensure(
+ isinstance(sk, bytes), "secret key must be bytes", raising=TypeError
+ )
+
+ if len(pk) != crypto_box_PUBLICKEYBYTES:
+ raise exc.ValueError("Invalid public key")
+
+ if len(sk) != crypto_box_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid secret key")
+
+ _clen = len(ciphertext)
+
+ ensure(
+ _clen >= crypto_box_SEALBYTES,
+ ("Input ciphertext must be at least {} long").format(
+ crypto_box_SEALBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ _mlen = _clen - crypto_box_SEALBYTES
+
+ # zero-length malloc results are implementation.dependent
+ plaintext = ffi.new("unsigned char[]", max(1, _mlen))
+
+ res = lib.crypto_box_seal_open(plaintext, ciphertext, _clen, pk, sk)
+ ensure(
+ res == 0,
+ "An error occurred trying to decrypt the message",
+ raising=exc.CryptoError,
+ )
+
+ return ffi.buffer(plaintext, _mlen)[:]
diff --git a/lib/nacl/bindings/crypto_core.py b/lib/nacl/bindings/crypto_core.py
new file mode 100644
index 0000000..e64a064
--- /dev/null
+++ b/lib/nacl/bindings/crypto_core.py
@@ -0,0 +1,449 @@
+# Copyright 2018 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+has_crypto_core_ed25519 = bool(lib.PYNACL_HAS_CRYPTO_CORE_ED25519)
+
+crypto_core_ed25519_BYTES = 0
+crypto_core_ed25519_SCALARBYTES = 0
+crypto_core_ed25519_NONREDUCEDSCALARBYTES = 0
+
+if has_crypto_core_ed25519:
+ crypto_core_ed25519_BYTES = lib.crypto_core_ed25519_bytes()
+ crypto_core_ed25519_SCALARBYTES = lib.crypto_core_ed25519_scalarbytes()
+ crypto_core_ed25519_NONREDUCEDSCALARBYTES = (
+ lib.crypto_core_ed25519_nonreducedscalarbytes()
+ )
+
+
+def crypto_core_ed25519_is_valid_point(p: bytes) -> bool:
+ """
+ Check if ``p`` represents a point on the edwards25519 curve, in canonical
+ form, on the main subgroup, and that the point doesn't have a small order.
+
+ :param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
+ representing a point on the edwards25519 curve
+ :type p: bytes
+ :return: point validity
+ :rtype: bool
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(p, bytes) and len(p) == crypto_core_ed25519_BYTES,
+ "Point must be a crypto_core_ed25519_BYTES long bytes sequence",
+ raising=exc.TypeError,
+ )
+
+ rc = lib.crypto_core_ed25519_is_valid_point(p)
+ return rc == 1
+
+
+def crypto_core_ed25519_from_uniform(r: bytes) -> bytes:
+ """
+ Maps a 32 bytes vector ``r`` to a point. The point is guaranteed to be on the main subgroup.
+ This function directly exposes the Elligator 2 map, uses the high bit to set
+ the sign of the X coordinate, and the resulting point is multiplied by the cofactor.
+
+ :param r: a :py:data:`.crypto_core_ed25519_BYTES` long bytes
+ sequence representing arbitrary data
+ :type r: bytes
+ :return: a point on the edwards25519 curve main order subgroup, represented as a
+ :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(r, bytes) and len(r) == crypto_core_ed25519_BYTES,
+ "Integer r must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_BYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ p = ffi.new("unsigned char[]", crypto_core_ed25519_BYTES)
+
+ rc = lib.crypto_core_ed25519_from_uniform(p, r)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(p, crypto_core_ed25519_BYTES)[:]
+
+
+def crypto_core_ed25519_add(p: bytes, q: bytes) -> bytes:
+ """
+ Add two points on the edwards25519 curve.
+
+ :param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
+ representing a point on the edwards25519 curve
+ :type p: bytes
+ :param q: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
+ representing a point on the edwards25519 curve
+ :type q: bytes
+ :return: a point on the edwards25519 curve represented as
+ a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(p, bytes)
+ and isinstance(q, bytes)
+ and len(p) == crypto_core_ed25519_BYTES
+ and len(q) == crypto_core_ed25519_BYTES,
+ "Each point must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_BYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_BYTES)
+
+ rc = lib.crypto_core_ed25519_add(r, p, q)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(r, crypto_core_ed25519_BYTES)[:]
+
+
+def crypto_core_ed25519_sub(p: bytes, q: bytes) -> bytes:
+ """
+ Subtract a point from another on the edwards25519 curve.
+
+ :param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
+ representing a point on the edwards25519 curve
+ :type p: bytes
+ :param q: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
+ representing a point on the edwards25519 curve
+ :type q: bytes
+ :return: a point on the edwards25519 curve represented as
+ a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(p, bytes)
+ and isinstance(q, bytes)
+ and len(p) == crypto_core_ed25519_BYTES
+ and len(q) == crypto_core_ed25519_BYTES,
+ "Each point must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_BYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_BYTES)
+
+ rc = lib.crypto_core_ed25519_sub(r, p, q)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(r, crypto_core_ed25519_BYTES)[:]
+
+
+def crypto_core_ed25519_scalar_invert(s: bytes) -> bytes:
+ """
+ Return the multiplicative inverse of integer ``s`` modulo ``L``,
+ i.e an integer ``i`` such that ``s * i = 1 (mod L)``, where ``L``
+ is the order of the main subgroup.
+
+ Raises a ``exc.RuntimeError`` if ``s`` is the integer zero.
+
+ :param s: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type s: bytes
+ :return: an integer represented as a
+ :py:data:`.crypto_core_ed25519_SCALARBYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(s, bytes) and len(s) == crypto_core_ed25519_SCALARBYTES,
+ "Integer s must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_SCALARBYTES)
+
+ rc = lib.crypto_core_ed25519_scalar_invert(r, s)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(r, crypto_core_ed25519_SCALARBYTES)[:]
+
+
+def crypto_core_ed25519_scalar_negate(s: bytes) -> bytes:
+ """
+ Return the integer ``n`` such that ``s + n = 0 (mod L)``, where ``L``
+ is the order of the main subgroup.
+
+ :param s: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type s: bytes
+ :return: an integer represented as a
+ :py:data:`.crypto_core_ed25519_SCALARBYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(s, bytes) and len(s) == crypto_core_ed25519_SCALARBYTES,
+ "Integer s must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_SCALARBYTES)
+
+ lib.crypto_core_ed25519_scalar_negate(r, s)
+
+ return ffi.buffer(r, crypto_core_ed25519_SCALARBYTES)[:]
+
+
+def crypto_core_ed25519_scalar_complement(s: bytes) -> bytes:
+ """
+ Return the complement of integer ``s`` modulo ``L``, i.e. an integer
+ ``c`` such that ``s + c = 1 (mod L)``, where ``L`` is the order of
+ the main subgroup.
+
+ :param s: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type s: bytes
+ :return: an integer represented as a
+ :py:data:`.crypto_core_ed25519_SCALARBYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(s, bytes) and len(s) == crypto_core_ed25519_SCALARBYTES,
+ "Integer s must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_SCALARBYTES)
+
+ lib.crypto_core_ed25519_scalar_complement(r, s)
+
+ return ffi.buffer(r, crypto_core_ed25519_SCALARBYTES)[:]
+
+
+def crypto_core_ed25519_scalar_add(p: bytes, q: bytes) -> bytes:
+ """
+ Add integers ``p`` and ``q`` modulo ``L``, where ``L`` is the order of
+ the main subgroup.
+
+ :param p: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type p: bytes
+ :param q: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type q: bytes
+ :return: an integer represented as a
+ :py:data:`.crypto_core_ed25519_SCALARBYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(p, bytes)
+ and isinstance(q, bytes)
+ and len(p) == crypto_core_ed25519_SCALARBYTES
+ and len(q) == crypto_core_ed25519_SCALARBYTES,
+ "Each integer must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_SCALARBYTES)
+
+ lib.crypto_core_ed25519_scalar_add(r, p, q)
+
+ return ffi.buffer(r, crypto_core_ed25519_SCALARBYTES)[:]
+
+
+def crypto_core_ed25519_scalar_sub(p: bytes, q: bytes) -> bytes:
+ """
+ Subtract integers ``p`` and ``q`` modulo ``L``, where ``L`` is the
+ order of the main subgroup.
+
+ :param p: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type p: bytes
+ :param q: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type q: bytes
+ :return: an integer represented as a
+ :py:data:`.crypto_core_ed25519_SCALARBYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(p, bytes)
+ and isinstance(q, bytes)
+ and len(p) == crypto_core_ed25519_SCALARBYTES
+ and len(q) == crypto_core_ed25519_SCALARBYTES,
+ "Each integer must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_SCALARBYTES)
+
+ lib.crypto_core_ed25519_scalar_sub(r, p, q)
+
+ return ffi.buffer(r, crypto_core_ed25519_SCALARBYTES)[:]
+
+
+def crypto_core_ed25519_scalar_mul(p: bytes, q: bytes) -> bytes:
+ """
+ Multiply integers ``p`` and ``q`` modulo ``L``, where ``L`` is the
+ order of the main subgroup.
+
+ :param p: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type p: bytes
+ :param q: a :py:data:`.crypto_core_ed25519_SCALARBYTES`
+ long bytes sequence representing an integer
+ :type q: bytes
+ :return: an integer represented as a
+ :py:data:`.crypto_core_ed25519_SCALARBYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(p, bytes)
+ and isinstance(q, bytes)
+ and len(p) == crypto_core_ed25519_SCALARBYTES
+ and len(q) == crypto_core_ed25519_SCALARBYTES,
+ "Each integer must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_SCALARBYTES)
+
+ lib.crypto_core_ed25519_scalar_mul(r, p, q)
+
+ return ffi.buffer(r, crypto_core_ed25519_SCALARBYTES)[:]
+
+
+def crypto_core_ed25519_scalar_reduce(s: bytes) -> bytes:
+ """
+ Reduce integer ``s`` to ``s`` modulo ``L``, where ``L`` is the order
+ of the main subgroup.
+
+ :param s: a :py:data:`.crypto_core_ed25519_NONREDUCEDSCALARBYTES`
+ long bytes sequence representing an integer
+ :type s: bytes
+ :return: an integer represented as a
+ :py:data:`.crypto_core_ed25519_SCALARBYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_core_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(s, bytes)
+ and len(s) == crypto_core_ed25519_NONREDUCEDSCALARBYTES,
+ "Integer s must be a {} long bytes sequence".format(
+ "crypto_core_ed25519_NONREDUCEDSCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ r = ffi.new("unsigned char[]", crypto_core_ed25519_SCALARBYTES)
+
+ lib.crypto_core_ed25519_scalar_reduce(r, s)
+
+ return ffi.buffer(r, crypto_core_ed25519_SCALARBYTES)[:]
diff --git a/lib/nacl/bindings/crypto_generichash.py b/lib/nacl/bindings/crypto_generichash.py
new file mode 100644
index 0000000..6ab385a
--- /dev/null
+++ b/lib/nacl/bindings/crypto_generichash.py
@@ -0,0 +1,281 @@
+# Copyright 2013-2019 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import NoReturn, TypeVar
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+crypto_generichash_BYTES: int = lib.crypto_generichash_blake2b_bytes()
+crypto_generichash_BYTES_MIN: int = lib.crypto_generichash_blake2b_bytes_min()
+crypto_generichash_BYTES_MAX: int = lib.crypto_generichash_blake2b_bytes_max()
+crypto_generichash_KEYBYTES: int = lib.crypto_generichash_blake2b_keybytes()
+crypto_generichash_KEYBYTES_MIN: int = (
+ lib.crypto_generichash_blake2b_keybytes_min()
+)
+crypto_generichash_KEYBYTES_MAX: int = (
+ lib.crypto_generichash_blake2b_keybytes_max()
+)
+crypto_generichash_SALTBYTES: int = lib.crypto_generichash_blake2b_saltbytes()
+crypto_generichash_PERSONALBYTES: int = (
+ lib.crypto_generichash_blake2b_personalbytes()
+)
+crypto_generichash_STATEBYTES: int = lib.crypto_generichash_statebytes()
+
+_OVERLONG = "{0} length greater than {1} bytes"
+_TOOBIG = "{0} greater than {1}"
+
+
+def _checkparams(
+ digest_size: int, key: bytes, salt: bytes, person: bytes
+) -> None:
+ """Check hash parameters"""
+ ensure(
+ isinstance(key, bytes),
+ "Key must be a bytes sequence",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(salt, bytes),
+ "Salt must be a bytes sequence",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(person, bytes),
+ "Person must be a bytes sequence",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(digest_size, int),
+ "Digest size must be an integer number",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ digest_size <= crypto_generichash_BYTES_MAX,
+ _TOOBIG.format("Digest_size", crypto_generichash_BYTES_MAX),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ len(key) <= crypto_generichash_KEYBYTES_MAX,
+ _OVERLONG.format("Key", crypto_generichash_KEYBYTES_MAX),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ len(salt) <= crypto_generichash_SALTBYTES,
+ _OVERLONG.format("Salt", crypto_generichash_SALTBYTES),
+ raising=exc.ValueError,
+ )
+
+ ensure(
+ len(person) <= crypto_generichash_PERSONALBYTES,
+ _OVERLONG.format("Person", crypto_generichash_PERSONALBYTES),
+ raising=exc.ValueError,
+ )
+
+
+def generichash_blake2b_salt_personal(
+ data: bytes,
+ digest_size: int = crypto_generichash_BYTES,
+ key: bytes = b"",
+ salt: bytes = b"",
+ person: bytes = b"",
+) -> bytes:
+ """One shot hash interface
+
+ :param data: the input data to the hash function
+ :type data: bytes
+ :param digest_size: must be at most
+ :py:data:`.crypto_generichash_BYTES_MAX`;
+ the default digest size is
+ :py:data:`.crypto_generichash_BYTES`
+ :type digest_size: int
+ :param key: must be at most
+ :py:data:`.crypto_generichash_KEYBYTES_MAX` long
+ :type key: bytes
+ :param salt: must be at most
+ :py:data:`.crypto_generichash_SALTBYTES` long;
+ will be zero-padded if needed
+ :type salt: bytes
+ :param person: must be at most
+ :py:data:`.crypto_generichash_PERSONALBYTES` long:
+ will be zero-padded if needed
+ :type person: bytes
+ :return: digest_size long digest
+ :rtype: bytes
+ """
+
+ _checkparams(digest_size, key, salt, person)
+
+ ensure(
+ isinstance(data, bytes),
+ "Input data must be a bytes sequence",
+ raising=exc.TypeError,
+ )
+
+ digest = ffi.new("unsigned char[]", digest_size)
+
+ # both _salt and _personal must be zero-padded to the correct length
+ _salt = ffi.new("unsigned char []", crypto_generichash_SALTBYTES)
+ _person = ffi.new("unsigned char []", crypto_generichash_PERSONALBYTES)
+
+ ffi.memmove(_salt, salt, len(salt))
+ ffi.memmove(_person, person, len(person))
+
+ rc = lib.crypto_generichash_blake2b_salt_personal(
+ digest, digest_size, data, len(data), key, len(key), _salt, _person
+ )
+ ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
+
+ return ffi.buffer(digest, digest_size)[:]
+
+
+_Blake2State = TypeVar("_Blake2State", bound="Blake2State")
+
+
+class Blake2State:
+ """
+ Python-level wrapper for the crypto_generichash_blake2b state buffer
+ """
+
+ __slots__ = ["_statebuf", "digest_size"]
+
+ def __init__(self, digest_size: int):
+ self._statebuf = ffi.new(
+ "unsigned char[]", crypto_generichash_STATEBYTES
+ )
+ self.digest_size = digest_size
+
+ def __reduce__(self) -> NoReturn:
+ """
+ Raise the same exception as hashlib's blake implementation
+ on copy.copy()
+ """
+ raise TypeError(
+ "can't pickle {} objects".format(self.__class__.__name__)
+ )
+
+ def copy(self: _Blake2State) -> _Blake2State:
+ _st = self.__class__(self.digest_size)
+ ffi.memmove(
+ _st._statebuf, self._statebuf, crypto_generichash_STATEBYTES
+ )
+ return _st
+
+
+def generichash_blake2b_init(
+ key: bytes = b"",
+ salt: bytes = b"",
+ person: bytes = b"",
+ digest_size: int = crypto_generichash_BYTES,
+) -> Blake2State:
+ """
+ Create a new initialized blake2b hash state
+
+ :param key: must be at most
+ :py:data:`.crypto_generichash_KEYBYTES_MAX` long
+ :type key: bytes
+ :param salt: must be at most
+ :py:data:`.crypto_generichash_SALTBYTES` long;
+ will be zero-padded if needed
+ :type salt: bytes
+ :param person: must be at most
+ :py:data:`.crypto_generichash_PERSONALBYTES` long:
+ will be zero-padded if needed
+ :type person: bytes
+ :param digest_size: must be at most
+ :py:data:`.crypto_generichash_BYTES_MAX`;
+ the default digest size is
+ :py:data:`.crypto_generichash_BYTES`
+ :type digest_size: int
+ :return: a initialized :py:class:`.Blake2State`
+ :rtype: object
+ """
+
+ _checkparams(digest_size, key, salt, person)
+
+ state = Blake2State(digest_size)
+
+ # both _salt and _personal must be zero-padded to the correct length
+ _salt = ffi.new("unsigned char []", crypto_generichash_SALTBYTES)
+ _person = ffi.new("unsigned char []", crypto_generichash_PERSONALBYTES)
+
+ ffi.memmove(_salt, salt, len(salt))
+ ffi.memmove(_person, person, len(person))
+
+ rc = lib.crypto_generichash_blake2b_init_salt_personal(
+ state._statebuf, key, len(key), digest_size, _salt, _person
+ )
+ ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
+
+ return state
+
+
+def generichash_blake2b_update(state: Blake2State, data: bytes) -> None:
+ """Update the blake2b hash state
+
+ :param state: a initialized Blake2bState object as returned from
+ :py:func:`.crypto_generichash_blake2b_init`
+ :type state: :py:class:`.Blake2State`
+ :param data:
+ :type data: bytes
+ """
+
+ ensure(
+ isinstance(state, Blake2State),
+ "State must be a Blake2State object",
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(data, bytes),
+ "Input data must be a bytes sequence",
+ raising=exc.TypeError,
+ )
+
+ rc = lib.crypto_generichash_blake2b_update(
+ state._statebuf, data, len(data)
+ )
+ ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
+
+
+def generichash_blake2b_final(state: Blake2State) -> bytes:
+ """Finalize the blake2b hash state and return the digest.
+
+ :param state: a initialized Blake2bState object as returned from
+ :py:func:`.crypto_generichash_blake2b_init`
+ :type state: :py:class:`.Blake2State`
+ :return: the blake2 digest of the passed-in data stream
+ :rtype: bytes
+ """
+
+ ensure(
+ isinstance(state, Blake2State),
+ "State must be a Blake2State object",
+ raising=exc.TypeError,
+ )
+
+ _digest = ffi.new("unsigned char[]", crypto_generichash_BYTES_MAX)
+ rc = lib.crypto_generichash_blake2b_final(
+ state._statebuf, _digest, state.digest_size
+ )
+
+ ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
+ return ffi.buffer(_digest, state.digest_size)[:]
diff --git a/lib/nacl/bindings/crypto_hash.py b/lib/nacl/bindings/crypto_hash.py
new file mode 100644
index 0000000..2bab399
--- /dev/null
+++ b/lib/nacl/bindings/crypto_hash.py
@@ -0,0 +1,63 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+# crypto_hash_BYTES = lib.crypto_hash_bytes()
+crypto_hash_BYTES: int = lib.crypto_hash_sha512_bytes()
+crypto_hash_sha256_BYTES: int = lib.crypto_hash_sha256_bytes()
+crypto_hash_sha512_BYTES: int = lib.crypto_hash_sha512_bytes()
+
+
+def crypto_hash(message: bytes) -> bytes:
+ """
+ Hashes and returns the message ``message``.
+
+ :param message: bytes
+ :rtype: bytes
+ """
+ digest = ffi.new("unsigned char[]", crypto_hash_BYTES)
+ rc = lib.crypto_hash(digest, message, len(message))
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+ return ffi.buffer(digest, crypto_hash_BYTES)[:]
+
+
+def crypto_hash_sha256(message: bytes) -> bytes:
+ """
+ Hashes and returns the message ``message``.
+
+ :param message: bytes
+ :rtype: bytes
+ """
+ digest = ffi.new("unsigned char[]", crypto_hash_sha256_BYTES)
+ rc = lib.crypto_hash_sha256(digest, message, len(message))
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+ return ffi.buffer(digest, crypto_hash_sha256_BYTES)[:]
+
+
+def crypto_hash_sha512(message: bytes) -> bytes:
+ """
+ Hashes and returns the message ``message``.
+
+ :param message: bytes
+ :rtype: bytes
+ """
+ digest = ffi.new("unsigned char[]", crypto_hash_sha512_BYTES)
+ rc = lib.crypto_hash_sha512(digest, message, len(message))
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+ return ffi.buffer(digest, crypto_hash_sha512_BYTES)[:]
diff --git a/lib/nacl/bindings/crypto_kx.py b/lib/nacl/bindings/crypto_kx.py
new file mode 100644
index 0000000..3c649e4
--- /dev/null
+++ b/lib/nacl/bindings/crypto_kx.py
@@ -0,0 +1,200 @@
+# Copyright 2018 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Tuple
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+__all__ = [
+ "crypto_kx_keypair",
+ "crypto_kx_client_session_keys",
+ "crypto_kx_server_session_keys",
+ "crypto_kx_PUBLIC_KEY_BYTES",
+ "crypto_kx_SECRET_KEY_BYTES",
+ "crypto_kx_SEED_BYTES",
+ "crypto_kx_SESSION_KEY_BYTES",
+]
+
+"""
+Implementations of client, server key exchange
+"""
+crypto_kx_PUBLIC_KEY_BYTES: int = lib.crypto_kx_publickeybytes()
+crypto_kx_SECRET_KEY_BYTES: int = lib.crypto_kx_secretkeybytes()
+crypto_kx_SEED_BYTES: int = lib.crypto_kx_seedbytes()
+crypto_kx_SESSION_KEY_BYTES: int = lib.crypto_kx_sessionkeybytes()
+
+
+def crypto_kx_keypair() -> Tuple[bytes, bytes]:
+ """
+ Generate a key pair.
+ This is a duplicate crypto_box_keypair, but
+ is included for api consistency.
+ :return: (public_key, secret_key)
+ :rtype: (bytes, bytes)
+ """
+ public_key = ffi.new("unsigned char[]", crypto_kx_PUBLIC_KEY_BYTES)
+ secret_key = ffi.new("unsigned char[]", crypto_kx_SECRET_KEY_BYTES)
+ res = lib.crypto_kx_keypair(public_key, secret_key)
+ ensure(res == 0, "Key generation failed.", raising=exc.CryptoError)
+
+ return (
+ ffi.buffer(public_key, crypto_kx_PUBLIC_KEY_BYTES)[:],
+ ffi.buffer(secret_key, crypto_kx_SECRET_KEY_BYTES)[:],
+ )
+
+
+def crypto_kx_seed_keypair(seed: bytes) -> Tuple[bytes, bytes]:
+ """
+ Generate a key pair with a given seed.
+ This is functionally the same as crypto_box_seed_keypair, however
+ it uses the blake2b hash primitive instead of sha512.
+ It is included mainly for api consistency when using crypto_kx.
+ :param seed: random seed
+ :type seed: bytes
+ :return: (public_key, secret_key)
+ :rtype: (bytes, bytes)
+ """
+ public_key = ffi.new("unsigned char[]", crypto_kx_PUBLIC_KEY_BYTES)
+ secret_key = ffi.new("unsigned char[]", crypto_kx_SECRET_KEY_BYTES)
+ ensure(
+ isinstance(seed, bytes) and len(seed) == crypto_kx_SEED_BYTES,
+ "Seed must be a {} byte long bytes sequence".format(
+ crypto_kx_SEED_BYTES
+ ),
+ raising=exc.TypeError,
+ )
+ res = lib.crypto_kx_seed_keypair(public_key, secret_key, seed)
+ ensure(res == 0, "Key generation failed.", raising=exc.CryptoError)
+
+ return (
+ ffi.buffer(public_key, crypto_kx_PUBLIC_KEY_BYTES)[:],
+ ffi.buffer(secret_key, crypto_kx_SECRET_KEY_BYTES)[:],
+ )
+
+
+def crypto_kx_client_session_keys(
+ client_public_key: bytes,
+ client_secret_key: bytes,
+ server_public_key: bytes,
+) -> Tuple[bytes, bytes]:
+ """
+ Generate session keys for the client.
+ :param client_public_key:
+ :type client_public_key: bytes
+ :param client_secret_key:
+ :type client_secret_key: bytes
+ :param server_public_key:
+ :type server_public_key: bytes
+ :return: (rx_key, tx_key)
+ :rtype: (bytes, bytes)
+ """
+ ensure(
+ isinstance(client_public_key, bytes)
+ and len(client_public_key) == crypto_kx_PUBLIC_KEY_BYTES,
+ "Client public key must be a {} bytes long bytes sequence".format(
+ crypto_kx_PUBLIC_KEY_BYTES
+ ),
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(client_secret_key, bytes)
+ and len(client_secret_key) == crypto_kx_SECRET_KEY_BYTES,
+ "Client secret key must be a {} bytes long bytes sequence".format(
+ crypto_kx_PUBLIC_KEY_BYTES
+ ),
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(server_public_key, bytes)
+ and len(server_public_key) == crypto_kx_PUBLIC_KEY_BYTES,
+ "Server public key must be a {} bytes long bytes sequence".format(
+ crypto_kx_PUBLIC_KEY_BYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ rx_key = ffi.new("unsigned char[]", crypto_kx_SESSION_KEY_BYTES)
+ tx_key = ffi.new("unsigned char[]", crypto_kx_SESSION_KEY_BYTES)
+ res = lib.crypto_kx_client_session_keys(
+ rx_key, tx_key, client_public_key, client_secret_key, server_public_key
+ )
+ ensure(
+ res == 0,
+ "Client session key generation failed.",
+ raising=exc.CryptoError,
+ )
+
+ return (
+ ffi.buffer(rx_key, crypto_kx_SESSION_KEY_BYTES)[:],
+ ffi.buffer(tx_key, crypto_kx_SESSION_KEY_BYTES)[:],
+ )
+
+
+def crypto_kx_server_session_keys(
+ server_public_key: bytes,
+ server_secret_key: bytes,
+ client_public_key: bytes,
+) -> Tuple[bytes, bytes]:
+ """
+ Generate session keys for the server.
+ :param server_public_key:
+ :type server_public_key: bytes
+ :param server_secret_key:
+ :type server_secret_key: bytes
+ :param client_public_key:
+ :type client_public_key: bytes
+ :return: (rx_key, tx_key)
+ :rtype: (bytes, bytes)
+ """
+ ensure(
+ isinstance(server_public_key, bytes)
+ and len(server_public_key) == crypto_kx_PUBLIC_KEY_BYTES,
+ "Server public key must be a {} bytes long bytes sequence".format(
+ crypto_kx_PUBLIC_KEY_BYTES
+ ),
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(server_secret_key, bytes)
+ and len(server_secret_key) == crypto_kx_SECRET_KEY_BYTES,
+ "Server secret key must be a {} bytes long bytes sequence".format(
+ crypto_kx_PUBLIC_KEY_BYTES
+ ),
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(client_public_key, bytes)
+ and len(client_public_key) == crypto_kx_PUBLIC_KEY_BYTES,
+ "Client public key must be a {} bytes long bytes sequence".format(
+ crypto_kx_PUBLIC_KEY_BYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ rx_key = ffi.new("unsigned char[]", crypto_kx_SESSION_KEY_BYTES)
+ tx_key = ffi.new("unsigned char[]", crypto_kx_SESSION_KEY_BYTES)
+ res = lib.crypto_kx_server_session_keys(
+ rx_key, tx_key, server_public_key, server_secret_key, client_public_key
+ )
+ ensure(
+ res == 0,
+ "Server session key generation failed.",
+ raising=exc.CryptoError,
+ )
+
+ return (
+ ffi.buffer(rx_key, crypto_kx_SESSION_KEY_BYTES)[:],
+ ffi.buffer(tx_key, crypto_kx_SESSION_KEY_BYTES)[:],
+ )
diff --git a/lib/nacl/bindings/crypto_pwhash.py b/lib/nacl/bindings/crypto_pwhash.py
new file mode 100644
index 0000000..7f62360
--- /dev/null
+++ b/lib/nacl/bindings/crypto_pwhash.py
@@ -0,0 +1,599 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+from typing import Tuple
+
+import nacl.exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+has_crypto_pwhash_scryptsalsa208sha256 = bool(
+ lib.PYNACL_HAS_CRYPTO_PWHASH_SCRYPTSALSA208SHA256
+)
+
+crypto_pwhash_scryptsalsa208sha256_STRPREFIX = b""
+crypto_pwhash_scryptsalsa208sha256_SALTBYTES = 0
+crypto_pwhash_scryptsalsa208sha256_STRBYTES = 0
+crypto_pwhash_scryptsalsa208sha256_PASSWD_MIN = 0
+crypto_pwhash_scryptsalsa208sha256_PASSWD_MAX = 0
+crypto_pwhash_scryptsalsa208sha256_BYTES_MIN = 0
+crypto_pwhash_scryptsalsa208sha256_BYTES_MAX = 0
+crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MIN = 0
+crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MAX = 0
+crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MIN = 0
+crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MAX = 0
+crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_INTERACTIVE = 0
+crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_INTERACTIVE = 0
+crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_SENSITIVE = 0
+crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_SENSITIVE = 0
+
+if has_crypto_pwhash_scryptsalsa208sha256:
+ crypto_pwhash_scryptsalsa208sha256_STRPREFIX = ffi.string(
+ ffi.cast("char *", lib.crypto_pwhash_scryptsalsa208sha256_strprefix())
+ )[:]
+ crypto_pwhash_scryptsalsa208sha256_SALTBYTES = (
+ lib.crypto_pwhash_scryptsalsa208sha256_saltbytes()
+ )
+ crypto_pwhash_scryptsalsa208sha256_STRBYTES = (
+ lib.crypto_pwhash_scryptsalsa208sha256_strbytes()
+ )
+ crypto_pwhash_scryptsalsa208sha256_PASSWD_MIN = (
+ lib.crypto_pwhash_scryptsalsa208sha256_passwd_min()
+ )
+ crypto_pwhash_scryptsalsa208sha256_PASSWD_MAX = (
+ lib.crypto_pwhash_scryptsalsa208sha256_passwd_max()
+ )
+ crypto_pwhash_scryptsalsa208sha256_BYTES_MIN = (
+ lib.crypto_pwhash_scryptsalsa208sha256_bytes_min()
+ )
+ crypto_pwhash_scryptsalsa208sha256_BYTES_MAX = (
+ lib.crypto_pwhash_scryptsalsa208sha256_bytes_max()
+ )
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MIN = (
+ lib.crypto_pwhash_scryptsalsa208sha256_memlimit_min()
+ )
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MAX = (
+ lib.crypto_pwhash_scryptsalsa208sha256_memlimit_max()
+ )
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MIN = (
+ lib.crypto_pwhash_scryptsalsa208sha256_opslimit_min()
+ )
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MAX = (
+ lib.crypto_pwhash_scryptsalsa208sha256_opslimit_max()
+ )
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_INTERACTIVE = (
+ lib.crypto_pwhash_scryptsalsa208sha256_opslimit_interactive()
+ )
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_INTERACTIVE = (
+ lib.crypto_pwhash_scryptsalsa208sha256_memlimit_interactive()
+ )
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_SENSITIVE = (
+ lib.crypto_pwhash_scryptsalsa208sha256_opslimit_sensitive()
+ )
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_SENSITIVE = (
+ lib.crypto_pwhash_scryptsalsa208sha256_memlimit_sensitive()
+ )
+
+crypto_pwhash_ALG_ARGON2I13: int = lib.crypto_pwhash_alg_argon2i13()
+crypto_pwhash_ALG_ARGON2ID13: int = lib.crypto_pwhash_alg_argon2id13()
+crypto_pwhash_ALG_DEFAULT: int = lib.crypto_pwhash_alg_default()
+
+crypto_pwhash_SALTBYTES: int = lib.crypto_pwhash_saltbytes()
+crypto_pwhash_STRBYTES: int = lib.crypto_pwhash_strbytes()
+
+crypto_pwhash_PASSWD_MIN: int = lib.crypto_pwhash_passwd_min()
+crypto_pwhash_PASSWD_MAX: int = lib.crypto_pwhash_passwd_max()
+crypto_pwhash_BYTES_MIN: int = lib.crypto_pwhash_bytes_min()
+crypto_pwhash_BYTES_MAX: int = lib.crypto_pwhash_bytes_max()
+
+crypto_pwhash_argon2i_STRPREFIX: bytes = ffi.string(
+ ffi.cast("char *", lib.crypto_pwhash_argon2i_strprefix())
+)[:]
+crypto_pwhash_argon2i_MEMLIMIT_MIN: int = (
+ lib.crypto_pwhash_argon2i_memlimit_min()
+)
+crypto_pwhash_argon2i_MEMLIMIT_MAX: int = (
+ lib.crypto_pwhash_argon2i_memlimit_max()
+)
+crypto_pwhash_argon2i_OPSLIMIT_MIN: int = (
+ lib.crypto_pwhash_argon2i_opslimit_min()
+)
+crypto_pwhash_argon2i_OPSLIMIT_MAX: int = (
+ lib.crypto_pwhash_argon2i_opslimit_max()
+)
+crypto_pwhash_argon2i_OPSLIMIT_INTERACTIVE: int = (
+ lib.crypto_pwhash_argon2i_opslimit_interactive()
+)
+crypto_pwhash_argon2i_MEMLIMIT_INTERACTIVE: int = (
+ lib.crypto_pwhash_argon2i_memlimit_interactive()
+)
+crypto_pwhash_argon2i_OPSLIMIT_MODERATE: int = (
+ lib.crypto_pwhash_argon2i_opslimit_moderate()
+)
+crypto_pwhash_argon2i_MEMLIMIT_MODERATE: int = (
+ lib.crypto_pwhash_argon2i_memlimit_moderate()
+)
+crypto_pwhash_argon2i_OPSLIMIT_SENSITIVE: int = (
+ lib.crypto_pwhash_argon2i_opslimit_sensitive()
+)
+crypto_pwhash_argon2i_MEMLIMIT_SENSITIVE: int = (
+ lib.crypto_pwhash_argon2i_memlimit_sensitive()
+)
+
+crypto_pwhash_argon2id_STRPREFIX: bytes = ffi.string(
+ ffi.cast("char *", lib.crypto_pwhash_argon2id_strprefix())
+)[:]
+crypto_pwhash_argon2id_MEMLIMIT_MIN: int = (
+ lib.crypto_pwhash_argon2id_memlimit_min()
+)
+crypto_pwhash_argon2id_MEMLIMIT_MAX: int = (
+ lib.crypto_pwhash_argon2id_memlimit_max()
+)
+crypto_pwhash_argon2id_OPSLIMIT_MIN: int = (
+ lib.crypto_pwhash_argon2id_opslimit_min()
+)
+crypto_pwhash_argon2id_OPSLIMIT_MAX: int = (
+ lib.crypto_pwhash_argon2id_opslimit_max()
+)
+crypto_pwhash_argon2id_OPSLIMIT_INTERACTIVE: int = (
+ lib.crypto_pwhash_argon2id_opslimit_interactive()
+)
+crypto_pwhash_argon2id_MEMLIMIT_INTERACTIVE: int = (
+ lib.crypto_pwhash_argon2id_memlimit_interactive()
+)
+crypto_pwhash_argon2id_OPSLIMIT_MODERATE: int = (
+ lib.crypto_pwhash_argon2id_opslimit_moderate()
+)
+crypto_pwhash_argon2id_MEMLIMIT_MODERATE: int = (
+ lib.crypto_pwhash_argon2id_memlimit_moderate()
+)
+crypto_pwhash_argon2id_OPSLIMIT_SENSITIVE: int = (
+ lib.crypto_pwhash_argon2id_opslimit_sensitive()
+)
+crypto_pwhash_argon2id_MEMLIMIT_SENSITIVE: int = (
+ lib.crypto_pwhash_argon2id_memlimit_sensitive()
+)
+
+SCRYPT_OPSLIMIT_INTERACTIVE = (
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_INTERACTIVE
+)
+SCRYPT_MEMLIMIT_INTERACTIVE = (
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_INTERACTIVE
+)
+SCRYPT_OPSLIMIT_SENSITIVE = (
+ crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_SENSITIVE
+)
+SCRYPT_MEMLIMIT_SENSITIVE = (
+ crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_SENSITIVE
+)
+SCRYPT_SALTBYTES = crypto_pwhash_scryptsalsa208sha256_SALTBYTES
+SCRYPT_STRBYTES = crypto_pwhash_scryptsalsa208sha256_STRBYTES
+
+SCRYPT_PR_MAX = (1 << 30) - 1
+LOG2_UINT64_MAX = 63
+UINT64_MAX = (1 << 64) - 1
+SCRYPT_MAX_MEM = 32 * (1024 * 1024)
+
+
+def _check_memory_occupation(
+ n: int, r: int, p: int, maxmem: int = SCRYPT_MAX_MEM
+) -> None:
+ ensure(r != 0, "Invalid block size", raising=exc.ValueError)
+
+ ensure(p != 0, "Invalid parallelization factor", raising=exc.ValueError)
+
+ ensure(
+ (n & (n - 1)) == 0,
+ "Cost factor must be a power of 2",
+ raising=exc.ValueError,
+ )
+
+ ensure(n > 1, "Cost factor must be at least 2", raising=exc.ValueError)
+
+ ensure(
+ p <= SCRYPT_PR_MAX / r,
+ "p*r is greater than {}".format(SCRYPT_PR_MAX),
+ raising=exc.ValueError,
+ )
+
+ ensure(n < (1 << (16 * r)), raising=exc.ValueError)
+
+ Blen = p * 128 * r
+
+ i = UINT64_MAX / 128
+
+ ensure(n + 2 <= i / r, raising=exc.ValueError)
+
+ Vlen = 32 * r * (n + 2) * 4
+
+ ensure(Blen <= UINT64_MAX - Vlen, raising=exc.ValueError)
+
+ ensure(Blen <= sys.maxsize - Vlen, raising=exc.ValueError)
+
+ ensure(
+ Blen + Vlen <= maxmem,
+ "Memory limit would be exceeded with the chosen n, r, p",
+ raising=exc.ValueError,
+ )
+
+
+def nacl_bindings_pick_scrypt_params(
+ opslimit: int, memlimit: int
+) -> Tuple[int, int, int]:
+ """Python implementation of libsodium's pickparams"""
+
+ if opslimit < 32768:
+ opslimit = 32768
+
+ r = 8
+
+ if opslimit < (memlimit // 32):
+ p = 1
+ maxn = opslimit // (4 * r)
+ for n_log2 in range(1, 63): # pragma: no branch
+ if (2**n_log2) > (maxn // 2):
+ break
+ else:
+ maxn = memlimit // (r * 128)
+ for n_log2 in range(1, 63): # pragma: no branch
+ if (2**n_log2) > maxn // 2:
+ break
+
+ maxrp = (opslimit // 4) // (2**n_log2)
+
+ if maxrp > 0x3FFFFFFF: # pragma: no cover
+ maxrp = 0x3FFFFFFF
+
+ p = maxrp // r
+
+ return n_log2, r, p
+
+
+def crypto_pwhash_scryptsalsa208sha256_ll(
+ passwd: bytes,
+ salt: bytes,
+ n: int,
+ r: int,
+ p: int,
+ dklen: int = 64,
+ maxmem: int = SCRYPT_MAX_MEM,
+) -> bytes:
+ """
+ Derive a cryptographic key using the ``passwd`` and ``salt``
+ given as input.
+
+ The work factor can be tuned by by picking different
+ values for the parameters
+
+ :param bytes passwd:
+ :param bytes salt:
+ :param bytes salt: *must* be *exactly* :py:const:`.SALTBYTES` long
+ :param int dklen:
+ :param int opslimit:
+ :param int n:
+ :param int r: block size,
+ :param int p: the parallelism factor
+ :param int maxmem: the maximum available memory available for scrypt's
+ operations
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_pwhash_scryptsalsa208sha256,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(isinstance(n, int), raising=TypeError)
+ ensure(isinstance(r, int), raising=TypeError)
+ ensure(isinstance(p, int), raising=TypeError)
+
+ ensure(isinstance(passwd, bytes), raising=TypeError)
+ ensure(isinstance(salt, bytes), raising=TypeError)
+
+ _check_memory_occupation(n, r, p, maxmem)
+
+ buf = ffi.new("uint8_t[]", dklen)
+
+ ret = lib.crypto_pwhash_scryptsalsa208sha256_ll(
+ passwd, len(passwd), salt, len(salt), n, r, p, buf, dklen
+ )
+
+ ensure(
+ ret == 0,
+ "Unexpected failure in key derivation",
+ raising=exc.RuntimeError,
+ )
+
+ return ffi.buffer(ffi.cast("char *", buf), dklen)[:]
+
+
+def crypto_pwhash_scryptsalsa208sha256_str(
+ passwd: bytes,
+ opslimit: int = SCRYPT_OPSLIMIT_INTERACTIVE,
+ memlimit: int = SCRYPT_MEMLIMIT_INTERACTIVE,
+) -> bytes:
+ """
+ Derive a cryptographic key using the ``passwd`` and ``salt``
+ given as input, returning a string representation which includes
+ the salt and the tuning parameters.
+
+ The returned string can be directly stored as a password hash.
+
+ See :py:func:`.crypto_pwhash_scryptsalsa208sha256` for a short
+ discussion about ``opslimit`` and ``memlimit`` values.
+
+ :param bytes passwd:
+ :param int opslimit:
+ :param int memlimit:
+ :return: serialized key hash, including salt and tuning parameters
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_pwhash_scryptsalsa208sha256,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ buf = ffi.new("char[]", SCRYPT_STRBYTES)
+
+ ret = lib.crypto_pwhash_scryptsalsa208sha256_str(
+ buf, passwd, len(passwd), opslimit, memlimit
+ )
+
+ ensure(
+ ret == 0,
+ "Unexpected failure in password hashing",
+ raising=exc.RuntimeError,
+ )
+
+ return ffi.string(buf)
+
+
+def crypto_pwhash_scryptsalsa208sha256_str_verify(
+ passwd_hash: bytes, passwd: bytes
+) -> bool:
+ """
+ Verifies the ``passwd`` against the ``passwd_hash`` that was generated.
+ Returns True or False depending on the success
+
+ :param passwd_hash: bytes
+ :param passwd: bytes
+ :rtype: boolean
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_pwhash_scryptsalsa208sha256,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ len(passwd_hash) == SCRYPT_STRBYTES - 1,
+ "Invalid password hash",
+ raising=exc.ValueError,
+ )
+
+ ret = lib.crypto_pwhash_scryptsalsa208sha256_str_verify(
+ passwd_hash, passwd, len(passwd)
+ )
+ ensure(ret == 0, "Wrong password", raising=exc.InvalidkeyError)
+ # all went well, therefore:
+ return True
+
+
+def _check_argon2_limits_alg(opslimit: int, memlimit: int, alg: int) -> None:
+ if alg == crypto_pwhash_ALG_ARGON2I13:
+ if memlimit < crypto_pwhash_argon2i_MEMLIMIT_MIN:
+ raise exc.ValueError(
+ "memlimit must be at least {} bytes".format(
+ crypto_pwhash_argon2i_MEMLIMIT_MIN
+ )
+ )
+ elif memlimit > crypto_pwhash_argon2i_MEMLIMIT_MAX:
+ raise exc.ValueError(
+ "memlimit must be at most {} bytes".format(
+ crypto_pwhash_argon2i_MEMLIMIT_MAX
+ )
+ )
+ if opslimit < crypto_pwhash_argon2i_OPSLIMIT_MIN:
+ raise exc.ValueError(
+ "opslimit must be at least {}".format(
+ crypto_pwhash_argon2i_OPSLIMIT_MIN
+ )
+ )
+ elif opslimit > crypto_pwhash_argon2i_OPSLIMIT_MAX:
+ raise exc.ValueError(
+ "opslimit must be at most {}".format(
+ crypto_pwhash_argon2i_OPSLIMIT_MAX
+ )
+ )
+
+ elif alg == crypto_pwhash_ALG_ARGON2ID13:
+ if memlimit < crypto_pwhash_argon2id_MEMLIMIT_MIN:
+ raise exc.ValueError(
+ "memlimit must be at least {} bytes".format(
+ crypto_pwhash_argon2id_MEMLIMIT_MIN
+ )
+ )
+ elif memlimit > crypto_pwhash_argon2id_MEMLIMIT_MAX:
+ raise exc.ValueError(
+ "memlimit must be at most {} bytes".format(
+ crypto_pwhash_argon2id_MEMLIMIT_MAX
+ )
+ )
+ if opslimit < crypto_pwhash_argon2id_OPSLIMIT_MIN:
+ raise exc.ValueError(
+ "opslimit must be at least {}".format(
+ crypto_pwhash_argon2id_OPSLIMIT_MIN
+ )
+ )
+ elif opslimit > crypto_pwhash_argon2id_OPSLIMIT_MAX:
+ raise exc.ValueError(
+ "opslimit must be at most {}".format(
+ crypto_pwhash_argon2id_OPSLIMIT_MAX
+ )
+ )
+ else:
+ raise exc.TypeError("Unsupported algorithm")
+
+
+def crypto_pwhash_alg(
+ outlen: int,
+ passwd: bytes,
+ salt: bytes,
+ opslimit: int,
+ memlimit: int,
+ alg: int,
+) -> bytes:
+ """
+ Derive a raw cryptographic key using the ``passwd`` and the ``salt``
+ given as input to the ``alg`` algorithm.
+
+ :param outlen: the length of the derived key
+ :type outlen: int
+ :param passwd: The input password
+ :type passwd: bytes
+ :param salt:
+ :type salt: bytes
+ :param opslimit: computational cost
+ :type opslimit: int
+ :param memlimit: memory cost
+ :type memlimit: int
+ :param alg: algorithm identifier
+ :type alg: int
+ :return: derived key
+ :rtype: bytes
+ """
+ ensure(isinstance(outlen, int), raising=exc.TypeError)
+ ensure(isinstance(opslimit, int), raising=exc.TypeError)
+ ensure(isinstance(memlimit, int), raising=exc.TypeError)
+ ensure(isinstance(alg, int), raising=exc.TypeError)
+ ensure(isinstance(passwd, bytes), raising=exc.TypeError)
+
+ if len(salt) != crypto_pwhash_SALTBYTES:
+ raise exc.ValueError(
+ "salt must be exactly {} bytes long".format(
+ crypto_pwhash_SALTBYTES
+ )
+ )
+
+ if outlen < crypto_pwhash_BYTES_MIN:
+ raise exc.ValueError(
+ "derived key must be at least {} bytes long".format(
+ crypto_pwhash_BYTES_MIN
+ )
+ )
+
+ elif outlen > crypto_pwhash_BYTES_MAX:
+ raise exc.ValueError(
+ "derived key must be at most {} bytes long".format(
+ crypto_pwhash_BYTES_MAX
+ )
+ )
+
+ _check_argon2_limits_alg(opslimit, memlimit, alg)
+
+ outbuf = ffi.new("unsigned char[]", outlen)
+
+ ret = lib.crypto_pwhash(
+ outbuf, outlen, passwd, len(passwd), salt, opslimit, memlimit, alg
+ )
+
+ ensure(
+ ret == 0,
+ "Unexpected failure in key derivation",
+ raising=exc.RuntimeError,
+ )
+
+ return ffi.buffer(outbuf, outlen)[:]
+
+
+def crypto_pwhash_str_alg(
+ passwd: bytes,
+ opslimit: int,
+ memlimit: int,
+ alg: int,
+) -> bytes:
+ """
+ Derive a cryptographic key using the ``passwd`` given as input
+ and a random salt, returning a string representation which
+ includes the salt, the tuning parameters and the used algorithm.
+
+ :param passwd: The input password
+ :type passwd: bytes
+ :param opslimit: computational cost
+ :type opslimit: int
+ :param memlimit: memory cost
+ :type memlimit: int
+ :param alg: The algorithm to use
+ :type alg: int
+ :return: serialized derived key and parameters
+ :rtype: bytes
+ """
+ ensure(isinstance(opslimit, int), raising=TypeError)
+ ensure(isinstance(memlimit, int), raising=TypeError)
+ ensure(isinstance(passwd, bytes), raising=TypeError)
+
+ _check_argon2_limits_alg(opslimit, memlimit, alg)
+
+ outbuf = ffi.new("char[]", 128)
+
+ ret = lib.crypto_pwhash_str_alg(
+ outbuf, passwd, len(passwd), opslimit, memlimit, alg
+ )
+
+ ensure(
+ ret == 0,
+ "Unexpected failure in key derivation",
+ raising=exc.RuntimeError,
+ )
+
+ return ffi.string(outbuf)
+
+
+def crypto_pwhash_str_verify(passwd_hash: bytes, passwd: bytes) -> bool:
+ """
+ Verifies the ``passwd`` against a given password hash.
+
+ Returns True on success, raises InvalidkeyError on failure
+ :param passwd_hash: saved password hash
+ :type passwd_hash: bytes
+ :param passwd: password to be checked
+ :type passwd: bytes
+ :return: success
+ :rtype: boolean
+ """
+ ensure(isinstance(passwd_hash, bytes), raising=TypeError)
+ ensure(isinstance(passwd, bytes), raising=TypeError)
+ ensure(
+ len(passwd_hash) <= 127,
+ "Hash must be at most 127 bytes long",
+ raising=exc.ValueError,
+ )
+
+ ret = lib.crypto_pwhash_str_verify(passwd_hash, passwd, len(passwd))
+
+ ensure(ret == 0, "Wrong password", raising=exc.InvalidkeyError)
+ # all went well, therefore:
+ return True
+
+
+crypto_pwhash_argon2i_str_verify = crypto_pwhash_str_verify
diff --git a/lib/nacl/bindings/crypto_scalarmult.py b/lib/nacl/bindings/crypto_scalarmult.py
new file mode 100644
index 0000000..ca4a281
--- /dev/null
+++ b/lib/nacl/bindings/crypto_scalarmult.py
@@ -0,0 +1,240 @@
+# Copyright 2013-2018 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+has_crypto_scalarmult_ed25519 = bool(lib.PYNACL_HAS_CRYPTO_SCALARMULT_ED25519)
+
+crypto_scalarmult_BYTES: int = lib.crypto_scalarmult_bytes()
+crypto_scalarmult_SCALARBYTES: int = lib.crypto_scalarmult_scalarbytes()
+
+crypto_scalarmult_ed25519_BYTES = 0
+crypto_scalarmult_ed25519_SCALARBYTES = 0
+
+if has_crypto_scalarmult_ed25519:
+ crypto_scalarmult_ed25519_BYTES = lib.crypto_scalarmult_ed25519_bytes()
+ crypto_scalarmult_ed25519_SCALARBYTES = (
+ lib.crypto_scalarmult_ed25519_scalarbytes()
+ )
+
+
+def crypto_scalarmult_base(n: bytes) -> bytes:
+ """
+ Computes and returns the scalar product of a standard group element and an
+ integer ``n``.
+
+ :param n: bytes
+ :rtype: bytes
+ """
+ q = ffi.new("unsigned char[]", crypto_scalarmult_BYTES)
+
+ rc = lib.crypto_scalarmult_base(q, n)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(q, crypto_scalarmult_SCALARBYTES)[:]
+
+
+def crypto_scalarmult(n: bytes, p: bytes) -> bytes:
+ """
+ Computes and returns the scalar product of the given group element and an
+ integer ``n``.
+
+ :param p: bytes
+ :param n: bytes
+ :rtype: bytes
+ """
+ q = ffi.new("unsigned char[]", crypto_scalarmult_BYTES)
+
+ rc = lib.crypto_scalarmult(q, n, p)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(q, crypto_scalarmult_SCALARBYTES)[:]
+
+
+def crypto_scalarmult_ed25519_base(n: bytes) -> bytes:
+ """
+ Computes and returns the scalar product of a standard group element and an
+ integer ``n`` on the edwards25519 curve.
+
+ :param n: a :py:data:`.crypto_scalarmult_ed25519_SCALARBYTES` long bytes
+ sequence representing a scalar
+ :type n: bytes
+ :return: a point on the edwards25519 curve, represented as a
+ :py:data:`.crypto_scalarmult_ed25519_BYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_scalarmult_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(n, bytes)
+ and len(n) == crypto_scalarmult_ed25519_SCALARBYTES,
+ "Input must be a {} long bytes sequence".format(
+ "crypto_scalarmult_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ q = ffi.new("unsigned char[]", crypto_scalarmult_ed25519_BYTES)
+
+ rc = lib.crypto_scalarmult_ed25519_base(q, n)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(q, crypto_scalarmult_ed25519_BYTES)[:]
+
+
+def crypto_scalarmult_ed25519_base_noclamp(n: bytes) -> bytes:
+ """
+ Computes and returns the scalar product of a standard group element and an
+ integer ``n`` on the edwards25519 curve. The integer ``n`` is not clamped.
+
+ :param n: a :py:data:`.crypto_scalarmult_ed25519_SCALARBYTES` long bytes
+ sequence representing a scalar
+ :type n: bytes
+ :return: a point on the edwards25519 curve, represented as a
+ :py:data:`.crypto_scalarmult_ed25519_BYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_scalarmult_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(n, bytes)
+ and len(n) == crypto_scalarmult_ed25519_SCALARBYTES,
+ "Input must be a {} long bytes sequence".format(
+ "crypto_scalarmult_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ q = ffi.new("unsigned char[]", crypto_scalarmult_ed25519_BYTES)
+
+ rc = lib.crypto_scalarmult_ed25519_base_noclamp(q, n)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(q, crypto_scalarmult_ed25519_BYTES)[:]
+
+
+def crypto_scalarmult_ed25519(n: bytes, p: bytes) -> bytes:
+ """
+ Computes and returns the scalar product of a *clamped* integer ``n``
+ and the given group element on the edwards25519 curve.
+ The scalar is clamped, as done in the public key generation case,
+ by setting to zero the bits in position [0, 1, 2, 255] and setting
+ to one the bit in position 254.
+
+ :param n: a :py:data:`.crypto_scalarmult_ed25519_SCALARBYTES` long bytes
+ sequence representing a scalar
+ :type n: bytes
+ :param p: a :py:data:`.crypto_scalarmult_ed25519_BYTES` long bytes sequence
+ representing a point on the edwards25519 curve
+ :type p: bytes
+ :return: a point on the edwards25519 curve, represented as a
+ :py:data:`.crypto_scalarmult_ed25519_BYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_scalarmult_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(n, bytes)
+ and len(n) == crypto_scalarmult_ed25519_SCALARBYTES,
+ "Input must be a {} long bytes sequence".format(
+ "crypto_scalarmult_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(p, bytes) and len(p) == crypto_scalarmult_ed25519_BYTES,
+ "Input must be a {} long bytes sequence".format(
+ "crypto_scalarmult_ed25519_BYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ q = ffi.new("unsigned char[]", crypto_scalarmult_ed25519_BYTES)
+
+ rc = lib.crypto_scalarmult_ed25519(q, n, p)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(q, crypto_scalarmult_ed25519_BYTES)[:]
+
+
+def crypto_scalarmult_ed25519_noclamp(n: bytes, p: bytes) -> bytes:
+ """
+ Computes and returns the scalar product of an integer ``n``
+ and the given group element on the edwards25519 curve. The integer
+ ``n`` is not clamped.
+
+ :param n: a :py:data:`.crypto_scalarmult_ed25519_SCALARBYTES` long bytes
+ sequence representing a scalar
+ :type n: bytes
+ :param p: a :py:data:`.crypto_scalarmult_ed25519_BYTES` long bytes sequence
+ representing a point on the edwards25519 curve
+ :type p: bytes
+ :return: a point on the edwards25519 curve, represented as a
+ :py:data:`.crypto_scalarmult_ed25519_BYTES` long bytes sequence
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_scalarmult_ed25519,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ isinstance(n, bytes)
+ and len(n) == crypto_scalarmult_ed25519_SCALARBYTES,
+ "Input must be a {} long bytes sequence".format(
+ "crypto_scalarmult_ed25519_SCALARBYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ ensure(
+ isinstance(p, bytes) and len(p) == crypto_scalarmult_ed25519_BYTES,
+ "Input must be a {} long bytes sequence".format(
+ "crypto_scalarmult_ed25519_BYTES"
+ ),
+ raising=exc.TypeError,
+ )
+
+ q = ffi.new("unsigned char[]", crypto_scalarmult_ed25519_BYTES)
+
+ rc = lib.crypto_scalarmult_ed25519_noclamp(q, n, p)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(q, crypto_scalarmult_ed25519_BYTES)[:]
diff --git a/lib/nacl/bindings/crypto_secretbox.py b/lib/nacl/bindings/crypto_secretbox.py
new file mode 100644
index 0000000..d1ad113
--- /dev/null
+++ b/lib/nacl/bindings/crypto_secretbox.py
@@ -0,0 +1,159 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+crypto_secretbox_KEYBYTES: int = lib.crypto_secretbox_keybytes()
+crypto_secretbox_NONCEBYTES: int = lib.crypto_secretbox_noncebytes()
+crypto_secretbox_ZEROBYTES: int = lib.crypto_secretbox_zerobytes()
+crypto_secretbox_BOXZEROBYTES: int = lib.crypto_secretbox_boxzerobytes()
+crypto_secretbox_MACBYTES: int = lib.crypto_secretbox_macbytes()
+crypto_secretbox_MESSAGEBYTES_MAX: int = (
+ lib.crypto_secretbox_messagebytes_max()
+)
+
+
+def crypto_secretbox(message: bytes, nonce: bytes, key: bytes) -> bytes:
+ """
+ Encrypts and returns the message ``message`` with the secret ``key`` and
+ the nonce ``nonce``.
+
+ :param message: bytes
+ :param nonce: bytes
+ :param key: bytes
+ :rtype: bytes
+ """
+ if len(key) != crypto_secretbox_KEYBYTES:
+ raise exc.ValueError("Invalid key")
+
+ if len(nonce) != crypto_secretbox_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce")
+
+ padded = b"\x00" * crypto_secretbox_ZEROBYTES + message
+ ciphertext = ffi.new("unsigned char[]", len(padded))
+
+ res = lib.crypto_secretbox(ciphertext, padded, len(padded), nonce, key)
+ ensure(res == 0, "Encryption failed", raising=exc.CryptoError)
+
+ ciphertext = ffi.buffer(ciphertext, len(padded))
+ return ciphertext[crypto_secretbox_BOXZEROBYTES:]
+
+
+def crypto_secretbox_open(
+ ciphertext: bytes, nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Decrypt and returns the encrypted message ``ciphertext`` with the secret
+ ``key`` and the nonce ``nonce``.
+
+ :param ciphertext: bytes
+ :param nonce: bytes
+ :param key: bytes
+ :rtype: bytes
+ """
+ if len(key) != crypto_secretbox_KEYBYTES:
+ raise exc.ValueError("Invalid key")
+
+ if len(nonce) != crypto_secretbox_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce")
+
+ padded = b"\x00" * crypto_secretbox_BOXZEROBYTES + ciphertext
+ plaintext = ffi.new("unsigned char[]", len(padded))
+
+ res = lib.crypto_secretbox_open(plaintext, padded, len(padded), nonce, key)
+ ensure(
+ res == 0,
+ "Decryption failed. Ciphertext failed verification",
+ raising=exc.CryptoError,
+ )
+
+ plaintext = ffi.buffer(plaintext, len(padded))
+ return plaintext[crypto_secretbox_ZEROBYTES:]
+
+
+def crypto_secretbox_easy(message: bytes, nonce: bytes, key: bytes) -> bytes:
+ """
+ Encrypts and returns the message ``message`` with the secret ``key`` and
+ the nonce ``nonce``.
+
+ :param message: bytes
+ :param nonce: bytes
+ :param key: bytes
+ :rtype: bytes
+ """
+ if len(key) != crypto_secretbox_KEYBYTES:
+ raise exc.ValueError("Invalid key")
+
+ if len(nonce) != crypto_secretbox_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce")
+
+ _mlen = len(message)
+ _clen = crypto_secretbox_MACBYTES + _mlen
+
+ ciphertext = ffi.new("unsigned char[]", _clen)
+
+ res = lib.crypto_secretbox_easy(ciphertext, message, _mlen, nonce, key)
+ ensure(res == 0, "Encryption failed", raising=exc.CryptoError)
+
+ ciphertext = ffi.buffer(ciphertext, _clen)
+ return ciphertext[:]
+
+
+def crypto_secretbox_open_easy(
+ ciphertext: bytes, nonce: bytes, key: bytes
+) -> bytes:
+ """
+ Decrypt and returns the encrypted message ``ciphertext`` with the secret
+ ``key`` and the nonce ``nonce``.
+
+ :param ciphertext: bytes
+ :param nonce: bytes
+ :param key: bytes
+ :rtype: bytes
+ """
+ if len(key) != crypto_secretbox_KEYBYTES:
+ raise exc.ValueError("Invalid key")
+
+ if len(nonce) != crypto_secretbox_NONCEBYTES:
+ raise exc.ValueError("Invalid nonce")
+
+ _clen = len(ciphertext)
+
+ ensure(
+ _clen >= crypto_secretbox_MACBYTES,
+ "Input ciphertext must be at least {} long".format(
+ crypto_secretbox_MACBYTES
+ ),
+ raising=exc.TypeError,
+ )
+
+ _mlen = _clen - crypto_secretbox_MACBYTES
+
+ plaintext = ffi.new("unsigned char[]", max(1, _mlen))
+
+ res = lib.crypto_secretbox_open_easy(
+ plaintext, ciphertext, _clen, nonce, key
+ )
+ ensure(
+ res == 0,
+ "Decryption failed. Ciphertext failed verification",
+ raising=exc.CryptoError,
+ )
+
+ plaintext = ffi.buffer(plaintext, _mlen)
+ return plaintext[:]
diff --git a/lib/nacl/bindings/crypto_secretstream.py b/lib/nacl/bindings/crypto_secretstream.py
new file mode 100644
index 0000000..59b074c
--- /dev/null
+++ b/lib/nacl/bindings/crypto_secretstream.py
@@ -0,0 +1,358 @@
+# Copyright 2013-2018 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Optional, Tuple, Union, cast
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+crypto_secretstream_xchacha20poly1305_ABYTES: int = (
+ lib.crypto_secretstream_xchacha20poly1305_abytes()
+)
+crypto_secretstream_xchacha20poly1305_HEADERBYTES: int = (
+ lib.crypto_secretstream_xchacha20poly1305_headerbytes()
+)
+crypto_secretstream_xchacha20poly1305_KEYBYTES: int = (
+ lib.crypto_secretstream_xchacha20poly1305_keybytes()
+)
+crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX: int = (
+ lib.crypto_secretstream_xchacha20poly1305_messagebytes_max()
+)
+crypto_secretstream_xchacha20poly1305_STATEBYTES: int = (
+ lib.crypto_secretstream_xchacha20poly1305_statebytes()
+)
+
+
+crypto_secretstream_xchacha20poly1305_TAG_MESSAGE: int = (
+ lib.crypto_secretstream_xchacha20poly1305_tag_message()
+)
+crypto_secretstream_xchacha20poly1305_TAG_PUSH: int = (
+ lib.crypto_secretstream_xchacha20poly1305_tag_push()
+)
+crypto_secretstream_xchacha20poly1305_TAG_REKEY: int = (
+ lib.crypto_secretstream_xchacha20poly1305_tag_rekey()
+)
+crypto_secretstream_xchacha20poly1305_TAG_FINAL: int = (
+ lib.crypto_secretstream_xchacha20poly1305_tag_final()
+)
+
+
+def crypto_secretstream_xchacha20poly1305_keygen() -> bytes:
+ """
+ Generate a key for use with
+ :func:`.crypto_secretstream_xchacha20poly1305_init_push`.
+
+ """
+ keybuf = ffi.new(
+ "unsigned char[]",
+ crypto_secretstream_xchacha20poly1305_KEYBYTES,
+ )
+ lib.crypto_secretstream_xchacha20poly1305_keygen(keybuf)
+ return ffi.buffer(keybuf)[:]
+
+
+class crypto_secretstream_xchacha20poly1305_state:
+ """
+ An object wrapping the crypto_secretstream_xchacha20poly1305 state.
+
+ """
+
+ __slots__ = ["statebuf", "rawbuf", "tagbuf"]
+
+ def __init__(self) -> None:
+ """Initialize a clean state object."""
+ ByteString = Union[bytes, bytearray, memoryview]
+ self.statebuf: ByteString = ffi.new(
+ "unsigned char[]",
+ crypto_secretstream_xchacha20poly1305_STATEBYTES,
+ )
+
+ self.rawbuf: Optional[ByteString] = None
+ self.tagbuf: Optional[ByteString] = None
+
+
+def crypto_secretstream_xchacha20poly1305_init_push(
+ state: crypto_secretstream_xchacha20poly1305_state, key: bytes
+) -> bytes:
+ """
+ Initialize a crypto_secretstream_xchacha20poly1305 encryption buffer.
+
+ :param state: a secretstream state object
+ :type state: crypto_secretstream_xchacha20poly1305_state
+ :param key: must be
+ :data:`.crypto_secretstream_xchacha20poly1305_KEYBYTES` long
+ :type key: bytes
+ :return: header
+ :rtype: bytes
+
+ """
+ ensure(
+ isinstance(state, crypto_secretstream_xchacha20poly1305_state),
+ "State must be a crypto_secretstream_xchacha20poly1305_state object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(key, bytes),
+ "Key must be a bytes sequence",
+ raising=exc.TypeError,
+ )
+ ensure(
+ len(key) == crypto_secretstream_xchacha20poly1305_KEYBYTES,
+ "Invalid key length",
+ raising=exc.ValueError,
+ )
+
+ headerbuf = ffi.new(
+ "unsigned char []",
+ crypto_secretstream_xchacha20poly1305_HEADERBYTES,
+ )
+
+ rc = lib.crypto_secretstream_xchacha20poly1305_init_push(
+ state.statebuf, headerbuf, key
+ )
+ ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
+
+ return ffi.buffer(headerbuf)[:]
+
+
+def crypto_secretstream_xchacha20poly1305_push(
+ state: crypto_secretstream_xchacha20poly1305_state,
+ m: bytes,
+ ad: Optional[bytes] = None,
+ tag: int = crypto_secretstream_xchacha20poly1305_TAG_MESSAGE,
+) -> bytes:
+ """
+ Add an encrypted message to the secret stream.
+
+ :param state: a secretstream state object
+ :type state: crypto_secretstream_xchacha20poly1305_state
+ :param m: the message to encrypt, the maximum length of an individual
+ message is
+ :data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX`.
+ :type m: bytes
+ :param ad: additional data to include in the authentication tag
+ :type ad: bytes or None
+ :param tag: the message tag, usually
+ :data:`.crypto_secretstream_xchacha20poly1305_TAG_MESSAGE` or
+ :data:`.crypto_secretstream_xchacha20poly1305_TAG_FINAL`.
+ :type tag: int
+ :return: ciphertext
+ :rtype: bytes
+
+ """
+ ensure(
+ isinstance(state, crypto_secretstream_xchacha20poly1305_state),
+ "State must be a crypto_secretstream_xchacha20poly1305_state object",
+ raising=exc.TypeError,
+ )
+ ensure(isinstance(m, bytes), "Message is not bytes", raising=exc.TypeError)
+ ensure(
+ len(m) <= crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX,
+ "Message is too long",
+ raising=exc.ValueError,
+ )
+ ensure(
+ ad is None or isinstance(ad, bytes),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ clen = len(m) + crypto_secretstream_xchacha20poly1305_ABYTES
+ if state.rawbuf is None or len(state.rawbuf) < clen:
+ state.rawbuf = ffi.new("unsigned char[]", clen)
+
+ if ad is None:
+ ad = ffi.NULL
+ adlen = 0
+ else:
+ adlen = len(ad)
+
+ rc = lib.crypto_secretstream_xchacha20poly1305_push(
+ state.statebuf,
+ state.rawbuf,
+ ffi.NULL,
+ m,
+ len(m),
+ ad,
+ adlen,
+ tag,
+ )
+ ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
+
+ return ffi.buffer(state.rawbuf, clen)[:]
+
+
+def crypto_secretstream_xchacha20poly1305_init_pull(
+ state: crypto_secretstream_xchacha20poly1305_state,
+ header: bytes,
+ key: bytes,
+) -> None:
+ """
+ Initialize a crypto_secretstream_xchacha20poly1305 decryption buffer.
+
+ :param state: a secretstream state object
+ :type state: crypto_secretstream_xchacha20poly1305_state
+ :param header: must be
+ :data:`.crypto_secretstream_xchacha20poly1305_HEADERBYTES` long
+ :type header: bytes
+ :param key: must be
+ :data:`.crypto_secretstream_xchacha20poly1305_KEYBYTES` long
+ :type key: bytes
+
+ """
+ ensure(
+ isinstance(state, crypto_secretstream_xchacha20poly1305_state),
+ "State must be a crypto_secretstream_xchacha20poly1305_state object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(header, bytes),
+ "Header must be a bytes sequence",
+ raising=exc.TypeError,
+ )
+ ensure(
+ len(header) == crypto_secretstream_xchacha20poly1305_HEADERBYTES,
+ "Invalid header length",
+ raising=exc.ValueError,
+ )
+ ensure(
+ isinstance(key, bytes),
+ "Key must be a bytes sequence",
+ raising=exc.TypeError,
+ )
+ ensure(
+ len(key) == crypto_secretstream_xchacha20poly1305_KEYBYTES,
+ "Invalid key length",
+ raising=exc.ValueError,
+ )
+
+ if state.tagbuf is None:
+ state.tagbuf = ffi.new("unsigned char *")
+
+ rc = lib.crypto_secretstream_xchacha20poly1305_init_pull(
+ state.statebuf, header, key
+ )
+ ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
+
+
+def crypto_secretstream_xchacha20poly1305_pull(
+ state: crypto_secretstream_xchacha20poly1305_state,
+ c: bytes,
+ ad: Optional[bytes] = None,
+) -> Tuple[bytes, int]:
+ """
+ Read a decrypted message from the secret stream.
+
+ :param state: a secretstream state object
+ :type state: crypto_secretstream_xchacha20poly1305_state
+ :param c: the ciphertext to decrypt, the maximum length of an individual
+ ciphertext is
+ :data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX` +
+ :data:`.crypto_secretstream_xchacha20poly1305_ABYTES`.
+ :type c: bytes
+ :param ad: additional data to include in the authentication tag
+ :type ad: bytes or None
+ :return: (message, tag)
+ :rtype: (bytes, int)
+
+ """
+ ensure(
+ isinstance(state, crypto_secretstream_xchacha20poly1305_state),
+ "State must be a crypto_secretstream_xchacha20poly1305_state object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ state.tagbuf is not None,
+ (
+ "State must be initialized using "
+ "crypto_secretstream_xchacha20poly1305_init_pull"
+ ),
+ raising=exc.ValueError,
+ )
+ ensure(
+ isinstance(c, bytes),
+ "Ciphertext is not bytes",
+ raising=exc.TypeError,
+ )
+ ensure(
+ len(c) >= crypto_secretstream_xchacha20poly1305_ABYTES,
+ "Ciphertext is too short",
+ raising=exc.ValueError,
+ )
+ ensure(
+ len(c)
+ <= (
+ crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX
+ + crypto_secretstream_xchacha20poly1305_ABYTES
+ ),
+ "Ciphertext is too long",
+ raising=exc.ValueError,
+ )
+ ensure(
+ ad is None or isinstance(ad, bytes),
+ "Additional data must be bytes or None",
+ raising=exc.TypeError,
+ )
+
+ mlen = len(c) - crypto_secretstream_xchacha20poly1305_ABYTES
+ if state.rawbuf is None or len(state.rawbuf) < mlen:
+ state.rawbuf = ffi.new("unsigned char[]", mlen)
+
+ if ad is None:
+ ad = ffi.NULL
+ adlen = 0
+ else:
+ adlen = len(ad)
+
+ rc = lib.crypto_secretstream_xchacha20poly1305_pull(
+ state.statebuf,
+ state.rawbuf,
+ ffi.NULL,
+ state.tagbuf,
+ c,
+ len(c),
+ ad,
+ adlen,
+ )
+ ensure(rc == 0, "Unexpected failure", raising=exc.RuntimeError)
+
+ # Cast safety: we `ensure` above that `state.tagbuf is not None`.
+ return (
+ ffi.buffer(state.rawbuf, mlen)[:],
+ int(cast(bytes, state.tagbuf)[0]),
+ )
+
+
+def crypto_secretstream_xchacha20poly1305_rekey(
+ state: crypto_secretstream_xchacha20poly1305_state,
+) -> None:
+ """
+ Explicitly change the encryption key in the stream.
+
+ Normally the stream is re-keyed as needed or an explicit ``tag`` of
+ :data:`.crypto_secretstream_xchacha20poly1305_TAG_REKEY` is added to a
+ message to ensure forward secrecy, but this method can be used instead
+ if the re-keying is controlled without adding the tag.
+
+ :param state: a secretstream state object
+ :type state: crypto_secretstream_xchacha20poly1305_state
+
+ """
+ ensure(
+ isinstance(state, crypto_secretstream_xchacha20poly1305_state),
+ "State must be a crypto_secretstream_xchacha20poly1305_state object",
+ raising=exc.TypeError,
+ )
+ lib.crypto_secretstream_xchacha20poly1305_rekey(state.statebuf)
diff --git a/lib/nacl/bindings/crypto_shorthash.py b/lib/nacl/bindings/crypto_shorthash.py
new file mode 100644
index 0000000..8f7d209
--- /dev/null
+++ b/lib/nacl/bindings/crypto_shorthash.py
@@ -0,0 +1,81 @@
+# Copyright 2016 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import nacl.exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+has_crypto_shorthash_siphashx24 = bool(
+ lib.PYNACL_HAS_CRYPTO_SHORTHASH_SIPHASHX24
+)
+
+BYTES: int = lib.crypto_shorthash_siphash24_bytes()
+KEYBYTES: int = lib.crypto_shorthash_siphash24_keybytes()
+
+XBYTES = 0
+XKEYBYTES = 0
+
+if has_crypto_shorthash_siphashx24:
+ XBYTES = lib.crypto_shorthash_siphashx24_bytes()
+ XKEYBYTES = lib.crypto_shorthash_siphashx24_keybytes()
+
+
+def crypto_shorthash_siphash24(data: bytes, key: bytes) -> bytes:
+ """Compute a fast, cryptographic quality, keyed hash of the input data
+
+ :param data:
+ :type data: bytes
+ :param key: len(key) must be equal to
+ :py:data:`.KEYBYTES` (16)
+ :type key: bytes
+ """
+ if len(key) != KEYBYTES:
+ raise exc.ValueError(
+ "Key length must be exactly {} bytes".format(KEYBYTES)
+ )
+ digest = ffi.new("unsigned char[]", BYTES)
+ rc = lib.crypto_shorthash_siphash24(digest, data, len(data), key)
+
+ ensure(rc == 0, raising=exc.RuntimeError)
+ return ffi.buffer(digest, BYTES)[:]
+
+
+def crypto_shorthash_siphashx24(data: bytes, key: bytes) -> bytes:
+ """Compute a fast, cryptographic quality, keyed hash of the input data
+
+ :param data:
+ :type data: bytes
+ :param key: len(key) must be equal to
+ :py:data:`.XKEYBYTES` (16)
+ :type key: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+ """
+ ensure(
+ has_crypto_shorthash_siphashx24,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ if len(key) != XKEYBYTES:
+ raise exc.ValueError(
+ "Key length must be exactly {} bytes".format(XKEYBYTES)
+ )
+ digest = ffi.new("unsigned char[]", XBYTES)
+ rc = lib.crypto_shorthash_siphashx24(digest, data, len(data), key)
+
+ ensure(rc == 0, raising=exc.RuntimeError)
+ return ffi.buffer(digest, XBYTES)[:]
diff --git a/lib/nacl/bindings/crypto_sign.py b/lib/nacl/bindings/crypto_sign.py
new file mode 100644
index 0000000..f459f6a
--- /dev/null
+++ b/lib/nacl/bindings/crypto_sign.py
@@ -0,0 +1,327 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Tuple
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+crypto_sign_BYTES: int = lib.crypto_sign_bytes()
+# crypto_sign_SEEDBYTES = lib.crypto_sign_seedbytes()
+crypto_sign_SEEDBYTES: int = lib.crypto_sign_secretkeybytes() // 2
+crypto_sign_PUBLICKEYBYTES: int = lib.crypto_sign_publickeybytes()
+crypto_sign_SECRETKEYBYTES: int = lib.crypto_sign_secretkeybytes()
+
+crypto_sign_curve25519_BYTES: int = lib.crypto_box_secretkeybytes()
+
+crypto_sign_ed25519ph_STATEBYTES: int = lib.crypto_sign_ed25519ph_statebytes()
+
+
+def crypto_sign_keypair() -> Tuple[bytes, bytes]:
+ """
+ Returns a randomly generated public key and secret key.
+
+ :rtype: (bytes(public_key), bytes(secret_key))
+ """
+ pk = ffi.new("unsigned char[]", crypto_sign_PUBLICKEYBYTES)
+ sk = ffi.new("unsigned char[]", crypto_sign_SECRETKEYBYTES)
+
+ rc = lib.crypto_sign_keypair(pk, sk)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return (
+ ffi.buffer(pk, crypto_sign_PUBLICKEYBYTES)[:],
+ ffi.buffer(sk, crypto_sign_SECRETKEYBYTES)[:],
+ )
+
+
+def crypto_sign_seed_keypair(seed: bytes) -> Tuple[bytes, bytes]:
+ """
+ Computes and returns the public key and secret key using the seed ``seed``.
+
+ :param seed: bytes
+ :rtype: (bytes(public_key), bytes(secret_key))
+ """
+ if len(seed) != crypto_sign_SEEDBYTES:
+ raise exc.ValueError("Invalid seed")
+
+ pk = ffi.new("unsigned char[]", crypto_sign_PUBLICKEYBYTES)
+ sk = ffi.new("unsigned char[]", crypto_sign_SECRETKEYBYTES)
+
+ rc = lib.crypto_sign_seed_keypair(pk, sk, seed)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return (
+ ffi.buffer(pk, crypto_sign_PUBLICKEYBYTES)[:],
+ ffi.buffer(sk, crypto_sign_SECRETKEYBYTES)[:],
+ )
+
+
+def crypto_sign(message: bytes, sk: bytes) -> bytes:
+ """
+ Signs the message ``message`` using the secret key ``sk`` and returns the
+ signed message.
+
+ :param message: bytes
+ :param sk: bytes
+ :rtype: bytes
+ """
+ signed = ffi.new("unsigned char[]", len(message) + crypto_sign_BYTES)
+ signed_len = ffi.new("unsigned long long *")
+
+ rc = lib.crypto_sign(signed, signed_len, message, len(message), sk)
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(signed, signed_len[0])[:]
+
+
+def crypto_sign_open(signed: bytes, pk: bytes) -> bytes:
+ """
+ Verifies the signature of the signed message ``signed`` using the public
+ key ``pk`` and returns the unsigned message.
+
+ :param signed: bytes
+ :param pk: bytes
+ :rtype: bytes
+ """
+ message = ffi.new("unsigned char[]", len(signed))
+ message_len = ffi.new("unsigned long long *")
+
+ if (
+ lib.crypto_sign_open(message, message_len, signed, len(signed), pk)
+ != 0
+ ):
+ raise exc.BadSignatureError("Signature was forged or corrupt")
+
+ return ffi.buffer(message, message_len[0])[:]
+
+
+def crypto_sign_ed25519_pk_to_curve25519(public_key_bytes: bytes) -> bytes:
+ """
+ Converts a public Ed25519 key (encoded as bytes ``public_key_bytes``) to
+ a public Curve25519 key as bytes.
+
+ Raises a ValueError if ``public_key_bytes`` is not of length
+ ``crypto_sign_PUBLICKEYBYTES``
+
+ :param public_key_bytes: bytes
+ :rtype: bytes
+ """
+ if len(public_key_bytes) != crypto_sign_PUBLICKEYBYTES:
+ raise exc.ValueError("Invalid curve public key")
+
+ curve_public_key_len = crypto_sign_curve25519_BYTES
+ curve_public_key = ffi.new("unsigned char[]", curve_public_key_len)
+
+ rc = lib.crypto_sign_ed25519_pk_to_curve25519(
+ curve_public_key, public_key_bytes
+ )
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(curve_public_key, curve_public_key_len)[:]
+
+
+def crypto_sign_ed25519_sk_to_curve25519(secret_key_bytes: bytes) -> bytes:
+ """
+ Converts a secret Ed25519 key (encoded as bytes ``secret_key_bytes``) to
+ a secret Curve25519 key as bytes.
+
+ Raises a ValueError if ``secret_key_bytes``is not of length
+ ``crypto_sign_SECRETKEYBYTES``
+
+ :param secret_key_bytes: bytes
+ :rtype: bytes
+ """
+ if len(secret_key_bytes) != crypto_sign_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid curve secret key")
+
+ curve_secret_key_len = crypto_sign_curve25519_BYTES
+ curve_secret_key = ffi.new("unsigned char[]", curve_secret_key_len)
+
+ rc = lib.crypto_sign_ed25519_sk_to_curve25519(
+ curve_secret_key, secret_key_bytes
+ )
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(curve_secret_key, curve_secret_key_len)[:]
+
+
+def crypto_sign_ed25519_sk_to_pk(secret_key_bytes: bytes) -> bytes:
+ """
+ Extract the public Ed25519 key from a secret Ed25519 key (encoded
+ as bytes ``secret_key_bytes``).
+
+ Raises a ValueError if ``secret_key_bytes``is not of length
+ ``crypto_sign_SECRETKEYBYTES``
+
+ :param secret_key_bytes: bytes
+ :rtype: bytes
+ """
+ if len(secret_key_bytes) != crypto_sign_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid secret key")
+
+ return secret_key_bytes[crypto_sign_SEEDBYTES:]
+
+
+def crypto_sign_ed25519_sk_to_seed(secret_key_bytes: bytes) -> bytes:
+ """
+ Extract the seed from a secret Ed25519 key (encoded
+ as bytes ``secret_key_bytes``).
+
+ Raises a ValueError if ``secret_key_bytes``is not of length
+ ``crypto_sign_SECRETKEYBYTES``
+
+ :param secret_key_bytes: bytes
+ :rtype: bytes
+ """
+ if len(secret_key_bytes) != crypto_sign_SECRETKEYBYTES:
+ raise exc.ValueError("Invalid secret key")
+
+ return secret_key_bytes[:crypto_sign_SEEDBYTES]
+
+
+class crypto_sign_ed25519ph_state:
+ """
+ State object wrapping the sha-512 state used in ed25519ph computation
+ """
+
+ __slots__ = ["state"]
+
+ def __init__(self) -> None:
+ self.state: bytes = ffi.new(
+ "unsigned char[]", crypto_sign_ed25519ph_STATEBYTES
+ )
+
+ rc = lib.crypto_sign_ed25519ph_init(self.state)
+
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+
+def crypto_sign_ed25519ph_update(
+ edph: crypto_sign_ed25519ph_state, pmsg: bytes
+) -> None:
+ """
+ Update the hash state wrapped in edph
+
+ :param edph: the ed25519ph state being updated
+ :type edph: crypto_sign_ed25519ph_state
+ :param pmsg: the partial message
+ :type pmsg: bytes
+ :rtype: None
+ """
+ ensure(
+ isinstance(edph, crypto_sign_ed25519ph_state),
+ "edph parameter must be a ed25519ph_state object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(pmsg, bytes),
+ "pmsg parameter must be a bytes object",
+ raising=exc.TypeError,
+ )
+ rc = lib.crypto_sign_ed25519ph_update(edph.state, pmsg, len(pmsg))
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+
+def crypto_sign_ed25519ph_final_create(
+ edph: crypto_sign_ed25519ph_state, sk: bytes
+) -> bytes:
+ """
+ Create a signature for the data hashed in edph
+ using the secret key sk
+
+ :param edph: the ed25519ph state for the data
+ being signed
+ :type edph: crypto_sign_ed25519ph_state
+ :param sk: the ed25519 secret key (secret and public part)
+ :type sk: bytes
+ :return: ed25519ph signature
+ :rtype: bytes
+ """
+ ensure(
+ isinstance(edph, crypto_sign_ed25519ph_state),
+ "edph parameter must be a ed25519ph_state object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(sk, bytes),
+ "secret key parameter must be a bytes object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ len(sk) == crypto_sign_SECRETKEYBYTES,
+ ("secret key must be {} bytes long").format(
+ crypto_sign_SECRETKEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+ signature = ffi.new("unsigned char[]", crypto_sign_BYTES)
+ rc = lib.crypto_sign_ed25519ph_final_create(
+ edph.state, signature, ffi.NULL, sk
+ )
+ ensure(rc == 0, "Unexpected library error", raising=exc.RuntimeError)
+
+ return ffi.buffer(signature, crypto_sign_BYTES)[:]
+
+
+def crypto_sign_ed25519ph_final_verify(
+ edph: crypto_sign_ed25519ph_state, signature: bytes, pk: bytes
+) -> bool:
+ """
+ Verify a prehashed signature using the public key pk
+
+ :param edph: the ed25519ph state for the data
+ being verified
+ :type edph: crypto_sign_ed25519ph_state
+ :param signature: the signature being verified
+ :type signature: bytes
+ :param pk: the ed25519 public part of the signing key
+ :type pk: bytes
+ :return: True if the signature is valid
+ :rtype: boolean
+ :raises exc.BadSignatureError: if the signature is not valid
+ """
+ ensure(
+ isinstance(edph, crypto_sign_ed25519ph_state),
+ "edph parameter must be a ed25519ph_state object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(signature, bytes),
+ "signature parameter must be a bytes object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ len(signature) == crypto_sign_BYTES,
+ ("signature must be {} bytes long").format(crypto_sign_BYTES),
+ raising=exc.TypeError,
+ )
+ ensure(
+ isinstance(pk, bytes),
+ "public key parameter must be a bytes object",
+ raising=exc.TypeError,
+ )
+ ensure(
+ len(pk) == crypto_sign_PUBLICKEYBYTES,
+ ("public key must be {} bytes long").format(
+ crypto_sign_PUBLICKEYBYTES
+ ),
+ raising=exc.TypeError,
+ )
+ rc = lib.crypto_sign_ed25519ph_final_verify(edph.state, signature, pk)
+ if rc != 0:
+ raise exc.BadSignatureError("Signature was forged or corrupt")
+
+ return True
diff --git a/lib/nacl/bindings/randombytes.py b/lib/nacl/bindings/randombytes.py
new file mode 100644
index 0000000..ed76deb
--- /dev/null
+++ b/lib/nacl/bindings/randombytes.py
@@ -0,0 +1,51 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+
+randombytes_SEEDBYTES: int = lib.randombytes_seedbytes()
+
+
+def randombytes(size: int) -> bytes:
+ """
+ Returns ``size`` number of random bytes from a cryptographically secure
+ random source.
+
+ :param size: int
+ :rtype: bytes
+ """
+ buf = ffi.new("unsigned char[]", size)
+ lib.randombytes(buf, size)
+ return ffi.buffer(buf, size)[:]
+
+
+def randombytes_buf_deterministic(size: int, seed: bytes) -> bytes:
+ """
+ Returns ``size`` number of deterministically generated pseudorandom bytes
+ from a seed
+
+ :param size: int
+ :param seed: bytes
+ :rtype: bytes
+ """
+ if len(seed) != randombytes_SEEDBYTES:
+ raise exc.TypeError(
+ "Deterministic random bytes must be generated from 32 bytes"
+ )
+
+ buf = ffi.new("unsigned char[]", size)
+ lib.randombytes_buf_deterministic(buf, size, seed)
+ return ffi.buffer(buf, size)[:]
diff --git a/lib/nacl/bindings/sodium_core.py b/lib/nacl/bindings/sodium_core.py
new file mode 100644
index 0000000..7ebb84c
--- /dev/null
+++ b/lib/nacl/bindings/sodium_core.py
@@ -0,0 +1,33 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from nacl import exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+def _sodium_init() -> None:
+ ensure(
+ lib.sodium_init() != -1,
+ "Could not initialize sodium",
+ raising=exc.RuntimeError,
+ )
+
+
+def sodium_init() -> None:
+ """
+ Initializes sodium, picking the best implementations available for this
+ machine.
+ """
+ ffi.init_once(_sodium_init, "libsodium")
diff --git a/lib/nacl/bindings/utils.py b/lib/nacl/bindings/utils.py
new file mode 100644
index 0000000..0ff22e3
--- /dev/null
+++ b/lib/nacl/bindings/utils.py
@@ -0,0 +1,141 @@
+# Copyright 2013-2017 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import nacl.exceptions as exc
+from nacl._sodium import ffi, lib
+from nacl.exceptions import ensure
+
+
+def sodium_memcmp(inp1: bytes, inp2: bytes) -> bool:
+ """
+ Compare contents of two memory regions in constant time
+ """
+ ensure(isinstance(inp1, bytes), raising=exc.TypeError)
+ ensure(isinstance(inp2, bytes), raising=exc.TypeError)
+
+ ln = max(len(inp1), len(inp2))
+
+ buf1 = ffi.new("char []", ln)
+ buf2 = ffi.new("char []", ln)
+
+ ffi.memmove(buf1, inp1, len(inp1))
+ ffi.memmove(buf2, inp2, len(inp2))
+
+ eqL = len(inp1) == len(inp2)
+ eqC = lib.sodium_memcmp(buf1, buf2, ln) == 0
+
+ return eqL and eqC
+
+
+def sodium_pad(s: bytes, blocksize: int) -> bytes:
+ """
+ Pad the input bytearray ``s`` to a multiple of ``blocksize``
+ using the ISO/IEC 7816-4 algorithm
+
+ :param s: input bytes string
+ :type s: bytes
+ :param blocksize:
+ :type blocksize: int
+ :return: padded string
+ :rtype: bytes
+ """
+ ensure(isinstance(s, bytes), raising=exc.TypeError)
+ ensure(isinstance(blocksize, int), raising=exc.TypeError)
+ if blocksize <= 0:
+ raise exc.ValueError
+ s_len = len(s)
+ m_len = s_len + blocksize
+ buf = ffi.new("unsigned char []", m_len)
+ p_len = ffi.new("size_t []", 1)
+ ffi.memmove(buf, s, s_len)
+ rc = lib.sodium_pad(p_len, buf, s_len, blocksize, m_len)
+ ensure(rc == 0, "Padding failure", raising=exc.CryptoError)
+ return ffi.buffer(buf, p_len[0])[:]
+
+
+def sodium_unpad(s: bytes, blocksize: int) -> bytes:
+ """
+ Remove ISO/IEC 7816-4 padding from the input byte array ``s``
+
+ :param s: input bytes string
+ :type s: bytes
+ :param blocksize:
+ :type blocksize: int
+ :return: unpadded string
+ :rtype: bytes
+ """
+ ensure(isinstance(s, bytes), raising=exc.TypeError)
+ ensure(isinstance(blocksize, int), raising=exc.TypeError)
+ s_len = len(s)
+ u_len = ffi.new("size_t []", 1)
+ rc = lib.sodium_unpad(u_len, s, s_len, blocksize)
+ if rc != 0:
+ raise exc.CryptoError("Unpadding failure")
+ return s[: u_len[0]]
+
+
+def sodium_increment(inp: bytes) -> bytes:
+ """
+ Increment the value of a byte-sequence interpreted
+ as the little-endian representation of a unsigned big integer.
+
+ :param inp: input bytes buffer
+ :type inp: bytes
+ :return: a byte-sequence representing, as a little-endian
+ unsigned big integer, the value ``to_int(inp)``
+ incremented by one.
+ :rtype: bytes
+
+ """
+ ensure(isinstance(inp, bytes), raising=exc.TypeError)
+
+ ln = len(inp)
+ buf = ffi.new("unsigned char []", ln)
+
+ ffi.memmove(buf, inp, ln)
+
+ lib.sodium_increment(buf, ln)
+
+ return ffi.buffer(buf, ln)[:]
+
+
+def sodium_add(a: bytes, b: bytes) -> bytes:
+ """
+ Given a couple of *same-sized* byte sequences, interpreted as the
+ little-endian representation of two unsigned integers, compute
+ the modular addition of the represented values, in constant time for
+ a given common length of the byte sequences.
+
+ :param a: input bytes buffer
+ :type a: bytes
+ :param b: input bytes buffer
+ :type b: bytes
+ :return: a byte-sequence representing, as a little-endian big integer,
+ the integer value of ``(to_int(a) + to_int(b)) mod 2^(8*len(a))``
+ :rtype: bytes
+ """
+ ensure(isinstance(a, bytes), raising=exc.TypeError)
+ ensure(isinstance(b, bytes), raising=exc.TypeError)
+ ln = len(a)
+ ensure(len(b) == ln, raising=exc.TypeError)
+
+ buf_a = ffi.new("unsigned char []", ln)
+ buf_b = ffi.new("unsigned char []", ln)
+
+ ffi.memmove(buf_a, a, ln)
+ ffi.memmove(buf_b, b, ln)
+
+ lib.sodium_add(buf_a, buf_b, ln)
+
+ return ffi.buffer(buf_a, ln)[:]
diff --git a/lib/nacl/encoding.py b/lib/nacl/encoding.py
new file mode 100644
index 0000000..6740cfb
--- /dev/null
+++ b/lib/nacl/encoding.py
@@ -0,0 +1,105 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import base64
+import binascii
+from abc import ABCMeta, abstractmethod
+from typing import SupportsBytes, Type
+
+
+# TODO: when the minimum supported version of Python is 3.8, we can import
+# Protocol from typing, and replace Encoder with a Protocol instead.
+class _Encoder(metaclass=ABCMeta):
+ @staticmethod
+ @abstractmethod
+ def encode(data: bytes) -> bytes:
+ """Transform raw data to encoded data."""
+
+ @staticmethod
+ @abstractmethod
+ def decode(data: bytes) -> bytes:
+ """Transform encoded data back to raw data.
+
+ Decoding after encoding should be a no-op, i.e. `decode(encode(x)) == x`.
+ """
+
+
+# Functions that use encoders are passed a subclass of _Encoder, not an instance
+# (because the methods are all static). Let's gloss over that detail by defining
+# an alias for Type[_Encoder].
+Encoder = Type[_Encoder]
+
+
+class RawEncoder(_Encoder):
+ @staticmethod
+ def encode(data: bytes) -> bytes:
+ return data
+
+ @staticmethod
+ def decode(data: bytes) -> bytes:
+ return data
+
+
+class HexEncoder(_Encoder):
+ @staticmethod
+ def encode(data: bytes) -> bytes:
+ return binascii.hexlify(data)
+
+ @staticmethod
+ def decode(data: bytes) -> bytes:
+ return binascii.unhexlify(data)
+
+
+class Base16Encoder(_Encoder):
+ @staticmethod
+ def encode(data: bytes) -> bytes:
+ return base64.b16encode(data)
+
+ @staticmethod
+ def decode(data: bytes) -> bytes:
+ return base64.b16decode(data)
+
+
+class Base32Encoder(_Encoder):
+ @staticmethod
+ def encode(data: bytes) -> bytes:
+ return base64.b32encode(data)
+
+ @staticmethod
+ def decode(data: bytes) -> bytes:
+ return base64.b32decode(data)
+
+
+class Base64Encoder(_Encoder):
+ @staticmethod
+ def encode(data: bytes) -> bytes:
+ return base64.b64encode(data)
+
+ @staticmethod
+ def decode(data: bytes) -> bytes:
+ return base64.b64decode(data)
+
+
+class URLSafeBase64Encoder(_Encoder):
+ @staticmethod
+ def encode(data: bytes) -> bytes:
+ return base64.urlsafe_b64encode(data)
+
+ @staticmethod
+ def decode(data: bytes) -> bytes:
+ return base64.urlsafe_b64decode(data)
+
+
+class Encodable:
+ def encode(self: SupportsBytes, encoder: Encoder = RawEncoder) -> bytes:
+ return encoder.encode(bytes(self))
diff --git a/lib/nacl/exceptions.py b/lib/nacl/exceptions.py
new file mode 100644
index 0000000..40b1635
--- /dev/null
+++ b/lib/nacl/exceptions.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# We create a clone of various builtin Exception types which additionally
+# inherit from CryptoError. Below, we refer to the parent types via the
+# `builtins` namespace, so mypy can distinguish between (e.g.)
+# `nacl.exceptions.RuntimeError` and `builtins.RuntimeError`.
+import builtins
+from typing import Type
+
+
+class CryptoError(Exception):
+ """
+ Base exception for all nacl related errors
+ """
+
+
+class BadSignatureError(CryptoError):
+ """
+ Raised when the signature was forged or otherwise corrupt.
+ """
+
+
+class RuntimeError(builtins.RuntimeError, CryptoError):
+ pass
+
+
+class AssertionError(builtins.AssertionError, CryptoError):
+ pass
+
+
+class TypeError(builtins.TypeError, CryptoError):
+ pass
+
+
+class ValueError(builtins.ValueError, CryptoError):
+ pass
+
+
+class InvalidkeyError(CryptoError):
+ pass
+
+
+class CryptPrefixError(InvalidkeyError):
+ pass
+
+
+class UnavailableError(RuntimeError):
+ """
+ is a subclass of :class:`~nacl.exceptions.RuntimeError`, raised when
+ trying to call functions not available in a minimal build of
+ libsodium or due to hardware limitations.
+ """
+
+ pass
+
+
+def ensure(cond: bool, *args: object, **kwds: Type[Exception]) -> None:
+ """
+ Return if a condition is true, otherwise raise a caller-configurable
+ :py:class:`Exception`
+ :param bool cond: the condition to be checked
+ :param sequence args: the arguments to be passed to the exception's
+ constructor
+ The only accepted named parameter is `raising` used to configure the
+ exception to be raised if `cond` is not `True`
+ """
+ _CHK_UNEXP = "check_condition() got an unexpected keyword argument {0}"
+
+ raising = kwds.pop("raising", AssertionError)
+ if kwds:
+ raise TypeError(_CHK_UNEXP.format(repr(kwds.popitem()[0])))
+
+ if cond is True:
+ return
+ raise raising(*args)
diff --git a/lib/nacl/hash.py b/lib/nacl/hash.py
new file mode 100644
index 0000000..9f81590
--- /dev/null
+++ b/lib/nacl/hash.py
@@ -0,0 +1,181 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+The :mod:`nacl.hash` module exposes one-shot interfaces
+for libsodium selected hash primitives and the constants needed
+for their usage.
+"""
+
+import nacl.bindings
+import nacl.encoding
+
+
+BLAKE2B_BYTES = nacl.bindings.crypto_generichash_BYTES
+"""Default digest size for :func:`blake2b` hash"""
+BLAKE2B_BYTES_MIN = nacl.bindings.crypto_generichash_BYTES_MIN
+"""Minimum allowed digest size for :func:`blake2b` hash"""
+BLAKE2B_BYTES_MAX = nacl.bindings.crypto_generichash_BYTES_MAX
+"""Maximum allowed digest size for :func:`blake2b` hash"""
+BLAKE2B_KEYBYTES = nacl.bindings.crypto_generichash_KEYBYTES
+"""Default size of the ``key`` byte array for :func:`blake2b` hash"""
+BLAKE2B_KEYBYTES_MIN = nacl.bindings.crypto_generichash_KEYBYTES_MIN
+"""Minimum allowed size of the ``key`` byte array for :func:`blake2b` hash"""
+BLAKE2B_KEYBYTES_MAX = nacl.bindings.crypto_generichash_KEYBYTES_MAX
+"""Maximum allowed size of the ``key`` byte array for :func:`blake2b` hash"""
+BLAKE2B_SALTBYTES = nacl.bindings.crypto_generichash_SALTBYTES
+"""Maximum allowed length of the ``salt`` byte array for
+:func:`blake2b` hash"""
+BLAKE2B_PERSONALBYTES = nacl.bindings.crypto_generichash_PERSONALBYTES
+"""Maximum allowed length of the ``personalization``
+byte array for :func:`blake2b` hash"""
+
+SIPHASH_BYTES = nacl.bindings.crypto_shorthash_siphash24_BYTES
+"""Size of the :func:`siphash24` digest"""
+SIPHASH_KEYBYTES = nacl.bindings.crypto_shorthash_siphash24_KEYBYTES
+"""Size of the secret ``key`` used by the :func:`siphash24` MAC"""
+
+SIPHASHX_AVAILABLE = nacl.bindings.has_crypto_shorthash_siphashx24
+"""``True`` if :func:`siphashx24` is available to be called"""
+
+SIPHASHX_BYTES = nacl.bindings.crypto_shorthash_siphashx24_BYTES
+"""Size of the :func:`siphashx24` digest"""
+SIPHASHX_KEYBYTES = nacl.bindings.crypto_shorthash_siphashx24_KEYBYTES
+"""Size of the secret ``key`` used by the :func:`siphashx24` MAC"""
+
+_b2b_hash = nacl.bindings.crypto_generichash_blake2b_salt_personal
+_sip_hash = nacl.bindings.crypto_shorthash_siphash24
+_sip_hashx = nacl.bindings.crypto_shorthash_siphashx24
+
+
+def sha256(
+ message: bytes, encoder: nacl.encoding.Encoder = nacl.encoding.HexEncoder
+) -> bytes:
+ """
+ Hashes ``message`` with SHA256.
+
+ :param message: The message to hash.
+ :type message: bytes
+ :param encoder: A class that is able to encode the hashed message.
+ :returns: The hashed message.
+ :rtype: bytes
+ """
+ return encoder.encode(nacl.bindings.crypto_hash_sha256(message))
+
+
+def sha512(
+ message: bytes, encoder: nacl.encoding.Encoder = nacl.encoding.HexEncoder
+) -> bytes:
+ """
+ Hashes ``message`` with SHA512.
+
+ :param message: The message to hash.
+ :type message: bytes
+ :param encoder: A class that is able to encode the hashed message.
+ :returns: The hashed message.
+ :rtype: bytes
+ """
+ return encoder.encode(nacl.bindings.crypto_hash_sha512(message))
+
+
+def blake2b(
+ data: bytes,
+ digest_size: int = BLAKE2B_BYTES,
+ key: bytes = b"",
+ salt: bytes = b"",
+ person: bytes = b"",
+ encoder: nacl.encoding.Encoder = nacl.encoding.HexEncoder,
+) -> bytes:
+ """
+ Hashes ``data`` with blake2b.
+
+ :param data: the digest input byte sequence
+ :type data: bytes
+ :param digest_size: the requested digest size; must be at most
+ :const:`BLAKE2B_BYTES_MAX`;
+ the default digest size is
+ :const:`BLAKE2B_BYTES`
+ :type digest_size: int
+ :param key: the key to be set for keyed MAC/PRF usage; if set, the key
+ must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
+ :type key: bytes
+ :param salt: an initialization salt at most
+ :const:`BLAKE2B_SALTBYTES` long;
+ it will be zero-padded if needed
+ :type salt: bytes
+ :param person: a personalization string at most
+ :const:`BLAKE2B_PERSONALBYTES` long;
+ it will be zero-padded if needed
+ :type person: bytes
+ :param encoder: the encoder to use on returned digest
+ :type encoder: class
+ :returns: The hashed message.
+ :rtype: bytes
+ """
+
+ digest = _b2b_hash(
+ data, digest_size=digest_size, key=key, salt=salt, person=person
+ )
+ return encoder.encode(digest)
+
+
+generichash = blake2b
+
+
+def siphash24(
+ message: bytes,
+ key: bytes = b"",
+ encoder: nacl.encoding.Encoder = nacl.encoding.HexEncoder,
+) -> bytes:
+ """
+ Computes a keyed MAC of ``message`` using the short-input-optimized
+ siphash-2-4 construction.
+
+ :param message: The message to hash.
+ :type message: bytes
+ :param key: the message authentication key for the siphash MAC construct
+ :type key: bytes(:const:`SIPHASH_KEYBYTES`)
+ :param encoder: A class that is able to encode the hashed message.
+ :returns: The hashed message.
+ :rtype: bytes(:const:`SIPHASH_BYTES`)
+ """
+ digest = _sip_hash(message, key)
+ return encoder.encode(digest)
+
+
+shorthash = siphash24
+
+
+def siphashx24(
+ message: bytes,
+ key: bytes = b"",
+ encoder: nacl.encoding.Encoder = nacl.encoding.HexEncoder,
+) -> bytes:
+ """
+ Computes a keyed MAC of ``message`` using the 128 bit variant of the
+ siphash-2-4 construction.
+
+ :param message: The message to hash.
+ :type message: bytes
+ :param key: the message authentication key for the siphash MAC construct
+ :type key: bytes(:const:`SIPHASHX_KEYBYTES`)
+ :param encoder: A class that is able to encode the hashed message.
+ :returns: The hashed message.
+ :rtype: bytes(:const:`SIPHASHX_BYTES`)
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+
+ .. versionadded:: 1.2
+ """
+ digest = _sip_hashx(message, key)
+ return encoder.encode(digest)
diff --git a/lib/nacl/hashlib.py b/lib/nacl/hashlib.py
new file mode 100644
index 0000000..9d289da
--- /dev/null
+++ b/lib/nacl/hashlib.py
@@ -0,0 +1,143 @@
+# Copyright 2016-2019 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import binascii
+from typing import NoReturn
+
+import nacl.bindings
+from nacl.utils import bytes_as_string
+
+BYTES = nacl.bindings.crypto_generichash_BYTES
+BYTES_MIN = nacl.bindings.crypto_generichash_BYTES_MIN
+BYTES_MAX = nacl.bindings.crypto_generichash_BYTES_MAX
+KEYBYTES = nacl.bindings.crypto_generichash_KEYBYTES
+KEYBYTES_MIN = nacl.bindings.crypto_generichash_KEYBYTES_MIN
+KEYBYTES_MAX = nacl.bindings.crypto_generichash_KEYBYTES_MAX
+SALTBYTES = nacl.bindings.crypto_generichash_SALTBYTES
+PERSONALBYTES = nacl.bindings.crypto_generichash_PERSONALBYTES
+
+SCRYPT_AVAILABLE = nacl.bindings.has_crypto_pwhash_scryptsalsa208sha256
+
+_b2b_init = nacl.bindings.crypto_generichash_blake2b_init
+_b2b_final = nacl.bindings.crypto_generichash_blake2b_final
+_b2b_update = nacl.bindings.crypto_generichash_blake2b_update
+
+
+class blake2b:
+ """
+ :py:mod:`hashlib` API compatible blake2b algorithm implementation
+ """
+
+ MAX_DIGEST_SIZE = BYTES
+ MAX_KEY_SIZE = KEYBYTES_MAX
+ PERSON_SIZE = PERSONALBYTES
+ SALT_SIZE = SALTBYTES
+
+ def __init__(
+ self,
+ data: bytes = b"",
+ digest_size: int = BYTES,
+ key: bytes = b"",
+ salt: bytes = b"",
+ person: bytes = b"",
+ ):
+ """
+ :py:class:`.blake2b` algorithm initializer
+
+ :param data:
+ :type data: bytes
+ :param int digest_size: the requested digest size; must be
+ at most :py:attr:`.MAX_DIGEST_SIZE`;
+ the default digest size is :py:data:`.BYTES`
+ :param key: the key to be set for keyed MAC/PRF usage; if set,
+ the key must be at most :py:data:`.KEYBYTES_MAX` long
+ :type key: bytes
+ :param salt: a initialization salt at most
+ :py:attr:`.SALT_SIZE` long; it will be zero-padded
+ if needed
+ :type salt: bytes
+ :param person: a personalization string at most
+ :py:attr:`.PERSONAL_SIZE` long; it will be zero-padded
+ if needed
+ :type person: bytes
+ """
+
+ self._state = _b2b_init(
+ key=key, salt=salt, person=person, digest_size=digest_size
+ )
+ self._digest_size = digest_size
+
+ if data:
+ self.update(data)
+
+ @property
+ def digest_size(self) -> int:
+ return self._digest_size
+
+ @property
+ def block_size(self) -> int:
+ return 128
+
+ @property
+ def name(self) -> str:
+ return "blake2b"
+
+ def update(self, data: bytes) -> None:
+ _b2b_update(self._state, data)
+
+ def digest(self) -> bytes:
+ _st = self._state.copy()
+ return _b2b_final(_st)
+
+ def hexdigest(self) -> str:
+ return bytes_as_string(binascii.hexlify(self.digest()))
+
+ def copy(self) -> "blake2b":
+ _cp = type(self)(digest_size=self.digest_size)
+ _st = self._state.copy()
+ _cp._state = _st
+ return _cp
+
+ def __reduce__(self) -> NoReturn:
+ """
+ Raise the same exception as hashlib's blake implementation
+ on copy.copy()
+ """
+ raise TypeError(
+ "can't pickle {} objects".format(self.__class__.__name__)
+ )
+
+
+def scrypt(
+ password: bytes,
+ salt: bytes = b"",
+ n: int = 2**20,
+ r: int = 8,
+ p: int = 1,
+ maxmem: int = 2**25,
+ dklen: int = 64,
+) -> bytes:
+ """
+ Derive a cryptographic key using the scrypt KDF.
+
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+
+ Implements the same signature as the ``hashlib.scrypt`` implemented
+ in cpython version 3.6
+ """
+ return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_ll(
+ password, salt, n, r, p, maxmem=maxmem, dklen=dklen
+ )
diff --git a/lib/nacl/public.py b/lib/nacl/public.py
new file mode 100644
index 0000000..a6fc958
--- /dev/null
+++ b/lib/nacl/public.py
@@ -0,0 +1,421 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import ClassVar, Generic, Optional, Type, TypeVar
+
+import nacl.bindings
+from nacl import encoding
+from nacl import exceptions as exc
+from nacl.encoding import Encoder
+from nacl.utils import EncryptedMessage, StringFixer, random
+
+
+class PublicKey(encoding.Encodable, StringFixer):
+ """
+ The public key counterpart to an Curve25519 :class:`nacl.public.PrivateKey`
+ for encrypting messages.
+
+ :param public_key: [:class:`bytes`] Encoded Curve25519 public key
+ :param encoder: A class that is able to decode the `public_key`
+
+ :cvar SIZE: The size that the public key is required to be
+ """
+
+ SIZE: ClassVar[int] = nacl.bindings.crypto_box_PUBLICKEYBYTES
+
+ def __init__(
+ self,
+ public_key: bytes,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ):
+ self._public_key = encoder.decode(public_key)
+ if not isinstance(self._public_key, bytes):
+ raise exc.TypeError("PublicKey must be created from 32 bytes")
+
+ if len(self._public_key) != self.SIZE:
+ raise exc.ValueError(
+ "The public key must be exactly {} bytes long".format(
+ self.SIZE
+ )
+ )
+
+ def __bytes__(self) -> bytes:
+ return self._public_key
+
+ def __hash__(self) -> int:
+ return hash(bytes(self))
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, self.__class__):
+ return False
+ return nacl.bindings.sodium_memcmp(bytes(self), bytes(other))
+
+ def __ne__(self, other: object) -> bool:
+ return not (self == other)
+
+
+class PrivateKey(encoding.Encodable, StringFixer):
+ """
+ Private key for decrypting messages using the Curve25519 algorithm.
+
+ .. warning:: This **must** be protected and remain secret. Anyone who
+ knows the value of your :class:`~nacl.public.PrivateKey` can decrypt
+ any message encrypted by the corresponding
+ :class:`~nacl.public.PublicKey`
+
+ :param private_key: The private key used to decrypt messages
+ :param encoder: The encoder class used to decode the given keys
+
+ :cvar SIZE: The size that the private key is required to be
+ :cvar SEED_SIZE: The size that the seed used to generate the
+ private key is required to be
+ """
+
+ SIZE: ClassVar[int] = nacl.bindings.crypto_box_SECRETKEYBYTES
+ SEED_SIZE: ClassVar[int] = nacl.bindings.crypto_box_SEEDBYTES
+
+ def __init__(
+ self,
+ private_key: bytes,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ):
+ # Decode the secret_key
+ private_key = encoder.decode(private_key)
+ # verify the given secret key type and size are correct
+ if not (
+ isinstance(private_key, bytes) and len(private_key) == self.SIZE
+ ):
+ raise exc.TypeError(
+ (
+ "PrivateKey must be created from a {} bytes long raw secret key"
+ ).format(self.SIZE)
+ )
+
+ raw_public_key = nacl.bindings.crypto_scalarmult_base(private_key)
+
+ self._private_key = private_key
+ self.public_key = PublicKey(raw_public_key)
+
+ @classmethod
+ def from_seed(
+ cls,
+ seed: bytes,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> "PrivateKey":
+ """
+ Generate a PrivateKey using a deterministic construction
+ starting from a caller-provided seed
+
+ .. warning:: The seed **must** be high-entropy; therefore,
+ its generator **must** be a cryptographic quality
+ random function like, for example, :func:`~nacl.utils.random`.
+
+ .. warning:: The seed **must** be protected and remain secret.
+ Anyone who knows the seed is really in possession of
+ the corresponding PrivateKey.
+
+ :param seed: The seed used to generate the private key
+ :rtype: :class:`~nacl.public.PrivateKey`
+ """
+ # decode the seed
+ seed = encoder.decode(seed)
+ # Verify the given seed type and size are correct
+ if not (isinstance(seed, bytes) and len(seed) == cls.SEED_SIZE):
+ raise exc.TypeError(
+ (
+ "PrivateKey seed must be a {} bytes long binary sequence"
+ ).format(cls.SEED_SIZE)
+ )
+ # generate a raw key pair from the given seed
+ raw_pk, raw_sk = nacl.bindings.crypto_box_seed_keypair(seed)
+ # construct a instance from the raw secret key
+ return cls(raw_sk)
+
+ def __bytes__(self) -> bytes:
+ return self._private_key
+
+ def __hash__(self) -> int:
+ return hash((type(self), bytes(self.public_key)))
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, self.__class__):
+ return False
+ return self.public_key == other.public_key
+
+ def __ne__(self, other: object) -> bool:
+ return not (self == other)
+
+ @classmethod
+ def generate(cls) -> "PrivateKey":
+ """
+ Generates a random :class:`~nacl.public.PrivateKey` object
+
+ :rtype: :class:`~nacl.public.PrivateKey`
+ """
+ return cls(random(PrivateKey.SIZE), encoder=encoding.RawEncoder)
+
+
+_Box = TypeVar("_Box", bound="Box")
+
+
+class Box(encoding.Encodable, StringFixer):
+ """
+ The Box class boxes and unboxes messages between a pair of keys
+
+ The ciphertexts generated by :class:`~nacl.public.Box` include a 16
+ byte authenticator which is checked as part of the decryption. An invalid
+ authenticator will cause the decrypt function to raise an exception. The
+ authenticator is not a signature. Once you've decrypted the message you've
+ demonstrated the ability to create arbitrary valid message, so messages you
+ send are repudiable. For non-repudiable messages, sign them after
+ encryption.
+
+ :param private_key: :class:`~nacl.public.PrivateKey` used to encrypt and
+ decrypt messages
+ :param public_key: :class:`~nacl.public.PublicKey` used to encrypt and
+ decrypt messages
+
+ :cvar NONCE_SIZE: The size that the nonce is required to be.
+ """
+
+ NONCE_SIZE: ClassVar[int] = nacl.bindings.crypto_box_NONCEBYTES
+ _shared_key: bytes
+
+ def __init__(self, private_key: PrivateKey, public_key: PublicKey):
+ if not isinstance(private_key, PrivateKey) or not isinstance(
+ public_key, PublicKey
+ ):
+ raise exc.TypeError(
+ "Box must be created from a PrivateKey and a PublicKey"
+ )
+ self._shared_key = nacl.bindings.crypto_box_beforenm(
+ public_key.encode(encoder=encoding.RawEncoder),
+ private_key.encode(encoder=encoding.RawEncoder),
+ )
+
+ def __bytes__(self) -> bytes:
+ return self._shared_key
+
+ @classmethod
+ def decode(
+ cls: Type[_Box], encoded: bytes, encoder: Encoder = encoding.RawEncoder
+ ) -> _Box:
+ """
+ Alternative constructor. Creates a Box from an existing Box's shared key.
+ """
+ # Create an empty box
+ box: _Box = cls.__new__(cls)
+
+ # Assign our decoded value to the shared key of the box
+ box._shared_key = encoder.decode(encoded)
+
+ return box
+
+ def encrypt(
+ self,
+ plaintext: bytes,
+ nonce: Optional[bytes] = None,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> EncryptedMessage:
+ """
+ Encrypts the plaintext message using the given `nonce` (or generates
+ one randomly if omitted) and returns the ciphertext encoded with the
+ encoder.
+
+ .. warning:: It is **VITALLY** important that the nonce is a nonce,
+ i.e. it is a number used only once for any given key. If you fail
+ to do this, you compromise the privacy of the messages encrypted.
+
+ :param plaintext: [:class:`bytes`] The plaintext message to encrypt
+ :param nonce: [:class:`bytes`] The nonce to use in the encryption
+ :param encoder: The encoder to use to encode the ciphertext
+ :rtype: [:class:`nacl.utils.EncryptedMessage`]
+ """
+ if nonce is None:
+ nonce = random(self.NONCE_SIZE)
+
+ if len(nonce) != self.NONCE_SIZE:
+ raise exc.ValueError(
+ "The nonce must be exactly %s bytes long" % self.NONCE_SIZE
+ )
+
+ ciphertext = nacl.bindings.crypto_box_easy_afternm(
+ plaintext,
+ nonce,
+ self._shared_key,
+ )
+
+ encoded_nonce = encoder.encode(nonce)
+ encoded_ciphertext = encoder.encode(ciphertext)
+
+ return EncryptedMessage._from_parts(
+ encoded_nonce,
+ encoded_ciphertext,
+ encoder.encode(nonce + ciphertext),
+ )
+
+ def decrypt(
+ self,
+ ciphertext: bytes,
+ nonce: Optional[bytes] = None,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> bytes:
+ """
+ Decrypts the ciphertext using the `nonce` (explicitly, when passed as a
+ parameter or implicitly, when omitted, as part of the ciphertext) and
+ returns the plaintext message.
+
+ :param ciphertext: [:class:`bytes`] The encrypted message to decrypt
+ :param nonce: [:class:`bytes`] The nonce used when encrypting the
+ ciphertext
+ :param encoder: The encoder used to decode the ciphertext.
+ :rtype: [:class:`bytes`]
+ """
+ # Decode our ciphertext
+ ciphertext = encoder.decode(ciphertext)
+
+ if nonce is None:
+ # If we were given the nonce and ciphertext combined, split them.
+ nonce = ciphertext[: self.NONCE_SIZE]
+ ciphertext = ciphertext[self.NONCE_SIZE :]
+
+ if len(nonce) != self.NONCE_SIZE:
+ raise exc.ValueError(
+ "The nonce must be exactly %s bytes long" % self.NONCE_SIZE
+ )
+
+ plaintext = nacl.bindings.crypto_box_open_easy_afternm(
+ ciphertext,
+ nonce,
+ self._shared_key,
+ )
+
+ return plaintext
+
+ def shared_key(self) -> bytes:
+ """
+ Returns the Curve25519 shared secret, that can then be used as a key in
+ other symmetric ciphers.
+
+ .. warning:: It is **VITALLY** important that you use a nonce with your
+ symmetric cipher. If you fail to do this, you compromise the
+ privacy of the messages encrypted. Ensure that the key length of
+ your cipher is 32 bytes.
+ :rtype: [:class:`bytes`]
+ """
+
+ return self._shared_key
+
+
+_Key = TypeVar("_Key", PublicKey, PrivateKey)
+
+
+class SealedBox(Generic[_Key], encoding.Encodable, StringFixer):
+ """
+ The SealedBox class boxes and unboxes messages addressed to
+ a specified key-pair by using ephemeral sender's key pairs,
+ whose private part will be discarded just after encrypting
+ a single plaintext message.
+
+ The ciphertexts generated by :class:`~nacl.public.SecretBox` include
+ the public part of the ephemeral key before the :class:`~nacl.public.Box`
+ ciphertext.
+
+ :param recipient_key: a :class:`~nacl.public.PublicKey` used to encrypt
+ messages and derive nonces, or a :class:`~nacl.public.PrivateKey` used
+ to decrypt messages.
+
+ .. versionadded:: 1.2
+ """
+
+ _public_key: bytes
+ _private_key: Optional[bytes]
+
+ def __init__(self, recipient_key: _Key):
+ if isinstance(recipient_key, PublicKey):
+ self._public_key = recipient_key.encode(
+ encoder=encoding.RawEncoder
+ )
+ self._private_key = None
+ elif isinstance(recipient_key, PrivateKey):
+ self._private_key = recipient_key.encode(
+ encoder=encoding.RawEncoder
+ )
+ self._public_key = recipient_key.public_key.encode(
+ encoder=encoding.RawEncoder
+ )
+ else:
+ raise exc.TypeError(
+ "SealedBox must be created from a PublicKey or a PrivateKey"
+ )
+
+ def __bytes__(self) -> bytes:
+ return self._public_key
+
+ def encrypt(
+ self,
+ plaintext: bytes,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> bytes:
+ """
+ Encrypts the plaintext message using a random-generated ephemeral
+ key pair and returns a "composed ciphertext", containing both
+ the public part of the key pair and the ciphertext proper,
+ encoded with the encoder.
+
+ The private part of the ephemeral key-pair will be scrubbed before
+ returning the ciphertext, therefore, the sender will not be able to
+ decrypt the generated ciphertext.
+
+ :param plaintext: [:class:`bytes`] The plaintext message to encrypt
+ :param encoder: The encoder to use to encode the ciphertext
+ :return bytes: encoded ciphertext
+ """
+
+ ciphertext = nacl.bindings.crypto_box_seal(plaintext, self._public_key)
+
+ encoded_ciphertext = encoder.encode(ciphertext)
+
+ return encoded_ciphertext
+
+ def decrypt(
+ self: "SealedBox[PrivateKey]",
+ ciphertext: bytes,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> bytes:
+ """
+ Decrypts the ciphertext using the ephemeral public key enclosed
+ in the ciphertext and the SealedBox private key, returning
+ the plaintext message.
+
+ :param ciphertext: [:class:`bytes`] The encrypted message to decrypt
+ :param encoder: The encoder used to decode the ciphertext.
+ :return bytes: The original plaintext
+ :raises TypeError: if this SealedBox was created with a
+ :class:`~nacl.public.PublicKey` rather than a
+ :class:`~nacl.public.PrivateKey`.
+ """
+ # Decode our ciphertext
+ ciphertext = encoder.decode(ciphertext)
+
+ if self._private_key is None:
+ raise TypeError(
+ "SealedBoxes created with a public key cannot decrypt"
+ )
+ plaintext = nacl.bindings.crypto_box_seal_open(
+ ciphertext,
+ self._public_key,
+ self._private_key,
+ )
+
+ return plaintext
diff --git a/lib/nacl/pwhash/__init__.py b/lib/nacl/pwhash/__init__.py
new file mode 100644
index 0000000..ffd76a6
--- /dev/null
+++ b/lib/nacl/pwhash/__init__.py
@@ -0,0 +1,75 @@
+# Copyright 2017 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from nacl.exceptions import CryptPrefixError
+
+from . import _argon2, argon2i, argon2id, scrypt
+
+STRPREFIX = argon2id.STRPREFIX
+
+PWHASH_SIZE = argon2id.PWHASH_SIZE
+
+assert _argon2.ALG_ARGON2_DEFAULT == _argon2.ALG_ARGON2ID13
+# since version 1.0.15 of libsodium
+
+PASSWD_MIN = argon2id.PASSWD_MIN
+PASSWD_MAX = argon2id.PASSWD_MAX
+MEMLIMIT_MAX = argon2id.MEMLIMIT_MAX
+MEMLIMIT_MIN = argon2id.MEMLIMIT_MIN
+OPSLIMIT_MAX = argon2id.OPSLIMIT_MAX
+OPSLIMIT_MIN = argon2id.OPSLIMIT_MIN
+OPSLIMIT_INTERACTIVE = argon2id.OPSLIMIT_INTERACTIVE
+MEMLIMIT_INTERACTIVE = argon2id.MEMLIMIT_INTERACTIVE
+OPSLIMIT_MODERATE = argon2id.OPSLIMIT_MODERATE
+MEMLIMIT_MODERATE = argon2id.MEMLIMIT_MODERATE
+OPSLIMIT_SENSITIVE = argon2id.OPSLIMIT_SENSITIVE
+MEMLIMIT_SENSITIVE = argon2id.MEMLIMIT_SENSITIVE
+
+str = argon2id.str
+
+assert argon2i.ALG != argon2id.ALG
+
+SCRYPT_SALTBYTES = scrypt.SALTBYTES
+SCRYPT_PWHASH_SIZE = scrypt.PWHASH_SIZE
+SCRYPT_OPSLIMIT_INTERACTIVE = scrypt.OPSLIMIT_INTERACTIVE
+SCRYPT_MEMLIMIT_INTERACTIVE = scrypt.MEMLIMIT_INTERACTIVE
+SCRYPT_OPSLIMIT_SENSITIVE = scrypt.OPSLIMIT_SENSITIVE
+SCRYPT_MEMLIMIT_SENSITIVE = scrypt.MEMLIMIT_SENSITIVE
+
+
+kdf_scryptsalsa208sha256 = scrypt.kdf
+scryptsalsa208sha256_str = scrypt.str
+verify_scryptsalsa208sha256 = scrypt.verify
+
+
+def verify(password_hash: bytes, password: bytes) -> bool:
+ """
+ Takes a modular crypt encoded stored password hash derived using one
+ of the algorithms supported by `libsodium` and checks if the user provided
+ password will hash to the same string when using the parameters saved
+ in the stored hash
+ """
+ if password_hash.startswith(argon2id.STRPREFIX):
+ return argon2id.verify(password_hash, password)
+ elif password_hash.startswith(argon2i.STRPREFIX):
+ return argon2id.verify(password_hash, password)
+ elif scrypt.AVAILABLE and password_hash.startswith(scrypt.STRPREFIX):
+ return scrypt.verify(password_hash, password)
+ else:
+ raise (
+ CryptPrefixError(
+ "given password_hash is not in a supported format"
+ )
+ )
diff --git a/lib/nacl/pwhash/__pycache__/__init__.cpython-314.pyc b/lib/nacl/pwhash/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..027c23f
Binary files /dev/null and b/lib/nacl/pwhash/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/nacl/pwhash/__pycache__/_argon2.cpython-314.pyc b/lib/nacl/pwhash/__pycache__/_argon2.cpython-314.pyc
new file mode 100644
index 0000000..e6ccc92
Binary files /dev/null and b/lib/nacl/pwhash/__pycache__/_argon2.cpython-314.pyc differ
diff --git a/lib/nacl/pwhash/__pycache__/argon2i.cpython-314.pyc b/lib/nacl/pwhash/__pycache__/argon2i.cpython-314.pyc
new file mode 100644
index 0000000..f7ac47e
Binary files /dev/null and b/lib/nacl/pwhash/__pycache__/argon2i.cpython-314.pyc differ
diff --git a/lib/nacl/pwhash/__pycache__/argon2id.cpython-314.pyc b/lib/nacl/pwhash/__pycache__/argon2id.cpython-314.pyc
new file mode 100644
index 0000000..f554fc3
Binary files /dev/null and b/lib/nacl/pwhash/__pycache__/argon2id.cpython-314.pyc differ
diff --git a/lib/nacl/pwhash/__pycache__/scrypt.cpython-314.pyc b/lib/nacl/pwhash/__pycache__/scrypt.cpython-314.pyc
new file mode 100644
index 0000000..253b253
Binary files /dev/null and b/lib/nacl/pwhash/__pycache__/scrypt.cpython-314.pyc differ
diff --git a/lib/nacl/pwhash/_argon2.py b/lib/nacl/pwhash/_argon2.py
new file mode 100644
index 0000000..856eda0
--- /dev/null
+++ b/lib/nacl/pwhash/_argon2.py
@@ -0,0 +1,49 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import nacl.bindings
+
+_argon2_strbytes_plus_one = nacl.bindings.crypto_pwhash_STRBYTES
+
+PWHASH_SIZE = _argon2_strbytes_plus_one - 1
+SALTBYTES = nacl.bindings.crypto_pwhash_SALTBYTES
+
+PASSWD_MIN = nacl.bindings.crypto_pwhash_PASSWD_MIN
+PASSWD_MAX = nacl.bindings.crypto_pwhash_PASSWD_MAX
+
+PWHASH_SIZE = _argon2_strbytes_plus_one - 1
+
+BYTES_MAX = nacl.bindings.crypto_pwhash_BYTES_MAX
+BYTES_MIN = nacl.bindings.crypto_pwhash_BYTES_MIN
+
+ALG_ARGON2I13 = nacl.bindings.crypto_pwhash_ALG_ARGON2I13
+ALG_ARGON2ID13 = nacl.bindings.crypto_pwhash_ALG_ARGON2ID13
+ALG_ARGON2_DEFAULT = nacl.bindings.crypto_pwhash_ALG_DEFAULT
+
+
+def verify(password_hash: bytes, password: bytes) -> bool:
+ """
+ Takes a modular crypt encoded argon2i or argon2id stored password hash
+ and checks if the user provided password will hash to the same string
+ when using the stored parameters
+
+ :param password_hash: password hash serialized in modular crypt() format
+ :type password_hash: bytes
+ :param password: user provided password
+ :type password: bytes
+ :rtype: boolean
+
+ .. versionadded:: 1.2
+ """
+ return nacl.bindings.crypto_pwhash_str_verify(password_hash, password)
diff --git a/lib/nacl/pwhash/argon2i.py b/lib/nacl/pwhash/argon2i.py
new file mode 100644
index 0000000..f9b3af7
--- /dev/null
+++ b/lib/nacl/pwhash/argon2i.py
@@ -0,0 +1,132 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import nacl.bindings
+import nacl.encoding
+
+from . import _argon2
+
+ALG = _argon2.ALG_ARGON2I13
+STRPREFIX = nacl.bindings.crypto_pwhash_argon2i_STRPREFIX
+
+SALTBYTES = _argon2.SALTBYTES
+
+PASSWD_MIN = _argon2.PASSWD_MIN
+PASSWD_MAX = _argon2.PASSWD_MAX
+
+PWHASH_SIZE = _argon2.PWHASH_SIZE
+
+BYTES_MIN = _argon2.BYTES_MIN
+BYTES_MAX = _argon2.BYTES_MAX
+
+verify = _argon2.verify
+
+MEMLIMIT_MAX = nacl.bindings.crypto_pwhash_argon2i_MEMLIMIT_MAX
+MEMLIMIT_MIN = nacl.bindings.crypto_pwhash_argon2i_MEMLIMIT_MIN
+OPSLIMIT_MAX = nacl.bindings.crypto_pwhash_argon2i_OPSLIMIT_MAX
+OPSLIMIT_MIN = nacl.bindings.crypto_pwhash_argon2i_OPSLIMIT_MIN
+
+OPSLIMIT_INTERACTIVE = nacl.bindings.crypto_pwhash_argon2i_OPSLIMIT_INTERACTIVE
+MEMLIMIT_INTERACTIVE = nacl.bindings.crypto_pwhash_argon2i_MEMLIMIT_INTERACTIVE
+OPSLIMIT_SENSITIVE = nacl.bindings.crypto_pwhash_argon2i_OPSLIMIT_SENSITIVE
+MEMLIMIT_SENSITIVE = nacl.bindings.crypto_pwhash_argon2i_MEMLIMIT_SENSITIVE
+
+OPSLIMIT_MODERATE = nacl.bindings.crypto_pwhash_argon2i_OPSLIMIT_MODERATE
+MEMLIMIT_MODERATE = nacl.bindings.crypto_pwhash_argon2i_MEMLIMIT_MODERATE
+
+
+def kdf(
+ size: int,
+ password: bytes,
+ salt: bytes,
+ opslimit: int = OPSLIMIT_SENSITIVE,
+ memlimit: int = MEMLIMIT_SENSITIVE,
+ encoder: nacl.encoding.Encoder = nacl.encoding.RawEncoder,
+) -> bytes:
+ """
+ Derive a ``size`` bytes long key from a caller-supplied
+ ``password`` and ``salt`` pair using the argon2i
+ memory-hard construct.
+
+ the enclosing module provides the constants
+
+ - :py:const:`.OPSLIMIT_INTERACTIVE`
+ - :py:const:`.MEMLIMIT_INTERACTIVE`
+ - :py:const:`.OPSLIMIT_MODERATE`
+ - :py:const:`.MEMLIMIT_MODERATE`
+ - :py:const:`.OPSLIMIT_SENSITIVE`
+ - :py:const:`.MEMLIMIT_SENSITIVE`
+
+ as a guidance for correct settings.
+
+ :param size: derived key size, must be between
+ :py:const:`.BYTES_MIN` and
+ :py:const:`.BYTES_MAX`
+ :type size: int
+ :param password: password used to seed the key derivation procedure;
+ it length must be between
+ :py:const:`.PASSWD_MIN` and
+ :py:const:`.PASSWD_MAX`
+ :type password: bytes
+ :param salt: **RANDOM** salt used in the key derivation procedure;
+ its length must be exactly :py:const:`.SALTBYTES`
+ :type salt: bytes
+ :param opslimit: the time component (operation count)
+ of the key derivation procedure's computational cost;
+ it must be between
+ :py:const:`.OPSLIMIT_MIN` and
+ :py:const:`.OPSLIMIT_MAX`
+ :type opslimit: int
+ :param memlimit: the memory occupation component
+ of the key derivation procedure's computational cost;
+ it must be between
+ :py:const:`.MEMLIMIT_MIN` and
+ :py:const:`.MEMLIMIT_MAX`
+ :type memlimit: int
+ :rtype: bytes
+
+ .. versionadded:: 1.2
+ """
+
+ return encoder.encode(
+ nacl.bindings.crypto_pwhash_alg(
+ size, password, salt, opslimit, memlimit, ALG
+ )
+ )
+
+
+def str(
+ password: bytes,
+ opslimit: int = OPSLIMIT_INTERACTIVE,
+ memlimit: int = MEMLIMIT_INTERACTIVE,
+) -> bytes:
+ """
+ Hashes a password with a random salt, using the memory-hard
+ argon2i construct and returning an ascii string that has all
+ the needed info to check against a future password
+
+
+ The default settings for opslimit and memlimit are those deemed
+ correct for the interactive user login case.
+
+ :param bytes password:
+ :param int opslimit:
+ :param int memlimit:
+ :rtype: bytes
+
+ .. versionadded:: 1.2
+ """
+ return nacl.bindings.crypto_pwhash_str_alg(
+ password, opslimit, memlimit, ALG
+ )
diff --git a/lib/nacl/pwhash/argon2id.py b/lib/nacl/pwhash/argon2id.py
new file mode 100644
index 0000000..f3aa3f7
--- /dev/null
+++ b/lib/nacl/pwhash/argon2id.py
@@ -0,0 +1,135 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import nacl.bindings
+import nacl.encoding
+
+from . import _argon2
+
+ALG = _argon2.ALG_ARGON2ID13
+STRPREFIX = nacl.bindings.crypto_pwhash_argon2id_STRPREFIX
+
+SALTBYTES = _argon2.SALTBYTES
+
+PASSWD_MIN = _argon2.PASSWD_MIN
+PASSWD_MAX = _argon2.PASSWD_MAX
+
+PWHASH_SIZE = _argon2.PWHASH_SIZE
+
+BYTES_MIN = _argon2.BYTES_MIN
+BYTES_MAX = _argon2.BYTES_MAX
+
+verify = _argon2.verify
+
+MEMLIMIT_MIN = nacl.bindings.crypto_pwhash_argon2id_MEMLIMIT_MIN
+MEMLIMIT_MAX = nacl.bindings.crypto_pwhash_argon2id_MEMLIMIT_MAX
+OPSLIMIT_MIN = nacl.bindings.crypto_pwhash_argon2id_OPSLIMIT_MIN
+OPSLIMIT_MAX = nacl.bindings.crypto_pwhash_argon2id_OPSLIMIT_MAX
+
+OPSLIMIT_INTERACTIVE = (
+ nacl.bindings.crypto_pwhash_argon2id_OPSLIMIT_INTERACTIVE
+)
+MEMLIMIT_INTERACTIVE = (
+ nacl.bindings.crypto_pwhash_argon2id_MEMLIMIT_INTERACTIVE
+)
+OPSLIMIT_SENSITIVE = nacl.bindings.crypto_pwhash_argon2id_OPSLIMIT_SENSITIVE
+MEMLIMIT_SENSITIVE = nacl.bindings.crypto_pwhash_argon2id_MEMLIMIT_SENSITIVE
+
+OPSLIMIT_MODERATE = nacl.bindings.crypto_pwhash_argon2id_OPSLIMIT_MODERATE
+MEMLIMIT_MODERATE = nacl.bindings.crypto_pwhash_argon2id_MEMLIMIT_MODERATE
+
+
+def kdf(
+ size: int,
+ password: bytes,
+ salt: bytes,
+ opslimit: int = OPSLIMIT_SENSITIVE,
+ memlimit: int = MEMLIMIT_SENSITIVE,
+ encoder: nacl.encoding.Encoder = nacl.encoding.RawEncoder,
+) -> bytes:
+ """
+ Derive a ``size`` bytes long key from a caller-supplied
+ ``password`` and ``salt`` pair using the argon2id
+ memory-hard construct.
+
+ the enclosing module provides the constants
+
+ - :py:const:`.OPSLIMIT_INTERACTIVE`
+ - :py:const:`.MEMLIMIT_INTERACTIVE`
+ - :py:const:`.OPSLIMIT_MODERATE`
+ - :py:const:`.MEMLIMIT_MODERATE`
+ - :py:const:`.OPSLIMIT_SENSITIVE`
+ - :py:const:`.MEMLIMIT_SENSITIVE`
+
+ as a guidance for correct settings.
+
+ :param size: derived key size, must be between
+ :py:const:`.BYTES_MIN` and
+ :py:const:`.BYTES_MAX`
+ :type size: int
+ :param password: password used to seed the key derivation procedure;
+ it length must be between
+ :py:const:`.PASSWD_MIN` and
+ :py:const:`.PASSWD_MAX`
+ :type password: bytes
+ :param salt: **RANDOM** salt used in the key derivation procedure;
+ its length must be exactly :py:const:`.SALTBYTES`
+ :type salt: bytes
+ :param opslimit: the time component (operation count)
+ of the key derivation procedure's computational cost;
+ it must be between
+ :py:const:`.OPSLIMIT_MIN` and
+ :py:const:`.OPSLIMIT_MAX`
+ :type opslimit: int
+ :param memlimit: the memory occupation component
+ of the key derivation procedure's computational cost;
+ it must be between
+ :py:const:`.MEMLIMIT_MIN` and
+ :py:const:`.MEMLIMIT_MAX`
+ :type memlimit: int
+ :rtype: bytes
+
+ .. versionadded:: 1.2
+ """
+
+ return encoder.encode(
+ nacl.bindings.crypto_pwhash_alg(
+ size, password, salt, opslimit, memlimit, ALG
+ )
+ )
+
+
+def str(
+ password: bytes,
+ opslimit: int = OPSLIMIT_INTERACTIVE,
+ memlimit: int = MEMLIMIT_INTERACTIVE,
+) -> bytes:
+ """
+ Hashes a password with a random salt, using the memory-hard
+ argon2id construct and returning an ascii string that has all
+ the needed info to check against a future password
+
+ The default settings for opslimit and memlimit are those deemed
+ correct for the interactive user login case.
+
+ :param bytes password:
+ :param int opslimit:
+ :param int memlimit:
+ :rtype: bytes
+
+ .. versionadded:: 1.2
+ """
+ return nacl.bindings.crypto_pwhash_str_alg(
+ password, opslimit, memlimit, ALG
+ )
diff --git a/lib/nacl/pwhash/scrypt.py b/lib/nacl/pwhash/scrypt.py
new file mode 100644
index 0000000..b9fc9d8
--- /dev/null
+++ b/lib/nacl/pwhash/scrypt.py
@@ -0,0 +1,211 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import cast
+
+import nacl.bindings
+import nacl.encoding
+from nacl import exceptions as exc
+from nacl.exceptions import ensure
+
+_strbytes_plus_one = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_STRBYTES
+
+AVAILABLE = nacl.bindings.has_crypto_pwhash_scryptsalsa208sha256
+
+STRPREFIX = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_STRPREFIX
+
+SALTBYTES = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_SALTBYTES
+
+PASSWD_MIN = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_PASSWD_MIN
+PASSWD_MAX = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_PASSWD_MAX
+
+PWHASH_SIZE = _strbytes_plus_one - 1
+
+BYTES_MIN = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_BYTES_MIN
+BYTES_MAX = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_BYTES_MAX
+
+MEMLIMIT_MIN = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MIN
+MEMLIMIT_MAX = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_MAX
+OPSLIMIT_MIN = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MIN
+OPSLIMIT_MAX = nacl.bindings.crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_MAX
+
+OPSLIMIT_INTERACTIVE = (
+ nacl.bindings.crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_INTERACTIVE
+)
+MEMLIMIT_INTERACTIVE = (
+ nacl.bindings.crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_INTERACTIVE
+)
+OPSLIMIT_SENSITIVE = (
+ nacl.bindings.crypto_pwhash_scryptsalsa208sha256_OPSLIMIT_SENSITIVE
+)
+MEMLIMIT_SENSITIVE = (
+ nacl.bindings.crypto_pwhash_scryptsalsa208sha256_MEMLIMIT_SENSITIVE
+)
+
+OPSLIMIT_MODERATE = 8 * OPSLIMIT_INTERACTIVE
+MEMLIMIT_MODERATE = 8 * MEMLIMIT_INTERACTIVE
+
+
+def kdf(
+ size: int,
+ password: bytes,
+ salt: bytes,
+ opslimit: int = OPSLIMIT_SENSITIVE,
+ memlimit: int = MEMLIMIT_SENSITIVE,
+ encoder: nacl.encoding.Encoder = nacl.encoding.RawEncoder,
+) -> bytes:
+ """
+ Derive a ``size`` bytes long key from a caller-supplied
+ ``password`` and ``salt`` pair using the scryptsalsa208sha256
+ memory-hard construct.
+
+
+ the enclosing module provides the constants
+
+ - :py:const:`.OPSLIMIT_INTERACTIVE`
+ - :py:const:`.MEMLIMIT_INTERACTIVE`
+ - :py:const:`.OPSLIMIT_SENSITIVE`
+ - :py:const:`.MEMLIMIT_SENSITIVE`
+ - :py:const:`.OPSLIMIT_MODERATE`
+ - :py:const:`.MEMLIMIT_MODERATE`
+
+ as a guidance for correct settings respectively for the
+ interactive login and the long term key protecting sensitive data
+ use cases.
+
+ :param size: derived key size, must be between
+ :py:const:`.BYTES_MIN` and
+ :py:const:`.BYTES_MAX`
+ :type size: int
+ :param password: password used to seed the key derivation procedure;
+ it length must be between
+ :py:const:`.PASSWD_MIN` and
+ :py:const:`.PASSWD_MAX`
+ :type password: bytes
+ :param salt: **RANDOM** salt used in the key derivation procedure;
+ its length must be exactly :py:const:`.SALTBYTES`
+ :type salt: bytes
+ :param opslimit: the time component (operation count)
+ of the key derivation procedure's computational cost;
+ it must be between
+ :py:const:`.OPSLIMIT_MIN` and
+ :py:const:`.OPSLIMIT_MAX`
+ :type opslimit: int
+ :param memlimit: the memory occupation component
+ of the key derivation procedure's computational cost;
+ it must be between
+ :py:const:`.MEMLIMIT_MIN` and
+ :py:const:`.MEMLIMIT_MAX`
+ :type memlimit: int
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+
+ .. versionadded:: 1.2
+ """
+ ensure(
+ AVAILABLE,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ len(salt) == SALTBYTES,
+ "The salt must be exactly %s, not %s bytes long"
+ % (SALTBYTES, len(salt)),
+ raising=exc.ValueError,
+ )
+
+ n_log2, r, p = nacl.bindings.nacl_bindings_pick_scrypt_params(
+ opslimit, memlimit
+ )
+ maxmem = memlimit + (2**16)
+
+ return encoder.encode(
+ nacl.bindings.crypto_pwhash_scryptsalsa208sha256_ll(
+ password,
+ salt,
+ # Cast safety: n_log2 is a positive integer, and so 2 ** n_log2 is also
+ # a positive integer. Mypy+typeshed can't deduce this, because there's no
+ # way to for them to know that n_log2: int is positive.
+ cast(int, 2**n_log2),
+ r,
+ p,
+ maxmem=maxmem,
+ dklen=size,
+ )
+ )
+
+
+def str(
+ password: bytes,
+ opslimit: int = OPSLIMIT_INTERACTIVE,
+ memlimit: int = MEMLIMIT_INTERACTIVE,
+) -> bytes:
+ """
+ Hashes a password with a random salt, using the memory-hard
+ scryptsalsa208sha256 construct and returning an ascii string
+ that has all the needed info to check against a future password
+
+ The default settings for opslimit and memlimit are those deemed
+ correct for the interactive user login case.
+
+ :param bytes password:
+ :param int opslimit:
+ :param int memlimit:
+ :rtype: bytes
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+
+ .. versionadded:: 1.2
+ """
+ ensure(
+ AVAILABLE,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_str(
+ password, opslimit, memlimit
+ )
+
+
+def verify(password_hash: bytes, password: bytes) -> bool:
+ """
+ Takes the output of scryptsalsa208sha256 and compares it against
+ a user provided password to see if they are the same
+
+ :param password_hash: bytes
+ :param password: bytes
+ :rtype: boolean
+ :raises nacl.exceptions.UnavailableError: If called when using a
+ minimal build of libsodium.
+
+ .. versionadded:: 1.2
+ """
+ ensure(
+ AVAILABLE,
+ "Not available in minimal build",
+ raising=exc.UnavailableError,
+ )
+
+ ensure(
+ len(password_hash) == PWHASH_SIZE,
+ "The password hash must be exactly %s bytes long"
+ % nacl.bindings.crypto_pwhash_scryptsalsa208sha256_STRBYTES,
+ raising=exc.ValueError,
+ )
+
+ return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_str_verify(
+ password_hash, password
+ )
diff --git a/lib/nacl/py.typed b/lib/nacl/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/lib/nacl/secret.py b/lib/nacl/secret.py
new file mode 100644
index 0000000..5c3064f
--- /dev/null
+++ b/lib/nacl/secret.py
@@ -0,0 +1,305 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import ClassVar, Optional
+
+import nacl.bindings
+from nacl import encoding
+from nacl import exceptions as exc
+from nacl.utils import EncryptedMessage, StringFixer, random
+
+
+class SecretBox(encoding.Encodable, StringFixer):
+ """
+ The SecretBox class encrypts and decrypts messages using the given secret
+ key.
+
+ The ciphertexts generated by :class:`~nacl.secret.Secretbox` include a 16
+ byte authenticator which is checked as part of the decryption. An invalid
+ authenticator will cause the decrypt function to raise an exception. The
+ authenticator is not a signature. Once you've decrypted the message you've
+ demonstrated the ability to create arbitrary valid message, so messages you
+ send are repudiable. For non-repudiable messages, sign them after
+ encryption.
+
+ Encryption is done using `XSalsa20-Poly1305`_, and there are no practical
+ limits on the number or size of messages (up to 2⁶⁴ messages, each up to 2⁶⁴
+ bytes).
+
+ .. _XSalsa20-Poly1305: https://doc.libsodium.org/secret-key_cryptography/secretbox#algorithm-details
+
+ :param key: The secret key used to encrypt and decrypt messages
+ :param encoder: The encoder class used to decode the given key
+
+ :cvar KEY_SIZE: The size that the key is required to be.
+ :cvar NONCE_SIZE: The size that the nonce is required to be.
+ :cvar MACBYTES: The size of the authentication MAC tag in bytes.
+ :cvar MESSAGEBYTES_MAX: The maximum size of a message which can be
+ safely encrypted with a single key/nonce
+ pair.
+ """
+
+ KEY_SIZE: ClassVar[int] = nacl.bindings.crypto_secretbox_KEYBYTES
+ NONCE_SIZE: ClassVar[int] = nacl.bindings.crypto_secretbox_NONCEBYTES
+ MACBYTES: ClassVar[int] = nacl.bindings.crypto_secretbox_MACBYTES
+ MESSAGEBYTES_MAX: ClassVar[int] = (
+ nacl.bindings.crypto_secretbox_MESSAGEBYTES_MAX
+ )
+
+ def __init__(
+ self, key: bytes, encoder: encoding.Encoder = encoding.RawEncoder
+ ):
+ key = encoder.decode(key)
+ if not isinstance(key, bytes):
+ raise exc.TypeError("SecretBox must be created from 32 bytes")
+
+ if len(key) != self.KEY_SIZE:
+ raise exc.ValueError(
+ "The key must be exactly %s bytes long" % self.KEY_SIZE,
+ )
+
+ self._key = key
+
+ def __bytes__(self) -> bytes:
+ return self._key
+
+ def encrypt(
+ self,
+ plaintext: bytes,
+ nonce: Optional[bytes] = None,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> EncryptedMessage:
+ """
+ Encrypts the plaintext message using the given `nonce` (or generates
+ one randomly if omitted) and returns the ciphertext encoded with the
+ encoder.
+
+ .. warning:: It is **VITALLY** important that the nonce is a nonce,
+ i.e. it is a number used only once for any given key. If you fail
+ to do this, you compromise the privacy of the messages encrypted.
+ Give your nonces a different prefix, or have one side use an odd
+ counter and one an even counter. Just make sure they are different.
+
+ :param plaintext: [:class:`bytes`] The plaintext message to encrypt
+ :param nonce: [:class:`bytes`] The nonce to use in the encryption
+ :param encoder: The encoder to use to encode the ciphertext
+ :rtype: [:class:`nacl.utils.EncryptedMessage`]
+ """
+ if nonce is None:
+ nonce = random(self.NONCE_SIZE)
+
+ if len(nonce) != self.NONCE_SIZE:
+ raise exc.ValueError(
+ "The nonce must be exactly %s bytes long" % self.NONCE_SIZE,
+ )
+
+ ciphertext = nacl.bindings.crypto_secretbox_easy(
+ plaintext, nonce, self._key
+ )
+
+ encoded_nonce = encoder.encode(nonce)
+ encoded_ciphertext = encoder.encode(ciphertext)
+
+ return EncryptedMessage._from_parts(
+ encoded_nonce,
+ encoded_ciphertext,
+ encoder.encode(nonce + ciphertext),
+ )
+
+ def decrypt(
+ self,
+ ciphertext: bytes,
+ nonce: Optional[bytes] = None,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> bytes:
+ """
+ Decrypts the ciphertext using the `nonce` (explicitly, when passed as a
+ parameter or implicitly, when omitted, as part of the ciphertext) and
+ returns the plaintext message.
+
+ :param ciphertext: [:class:`bytes`] The encrypted message to decrypt
+ :param nonce: [:class:`bytes`] The nonce used when encrypting the
+ ciphertext
+ :param encoder: The encoder used to decode the ciphertext.
+ :rtype: [:class:`bytes`]
+ """
+ # Decode our ciphertext
+ ciphertext = encoder.decode(ciphertext)
+
+ if nonce is None:
+ # If we were given the nonce and ciphertext combined, split them.
+ nonce = ciphertext[: self.NONCE_SIZE]
+ ciphertext = ciphertext[self.NONCE_SIZE :]
+
+ if len(nonce) != self.NONCE_SIZE:
+ raise exc.ValueError(
+ "The nonce must be exactly %s bytes long" % self.NONCE_SIZE,
+ )
+
+ plaintext = nacl.bindings.crypto_secretbox_open_easy(
+ ciphertext, nonce, self._key
+ )
+
+ return plaintext
+
+
+class Aead(encoding.Encodable, StringFixer):
+ """
+ The AEAD class encrypts and decrypts messages using the given secret key.
+
+ Unlike :class:`~nacl.secret.SecretBox`, AEAD supports authenticating
+ non-confidential data received alongside the message, such as a length
+ or type tag.
+
+ Like :class:`~nacl.secret.Secretbox`, this class provides authenticated
+ encryption. An inauthentic message will cause the decrypt function to raise
+ an exception.
+
+ Likewise, the authenticator should not be mistaken for a (public-key)
+ signature: recipients (with the ability to decrypt messages) are capable of
+ creating arbitrary valid message; in particular, this means AEAD messages
+ are repudiable. For non-repudiable messages, sign them after encryption.
+
+ The cryptosystem used is `XChacha20-Poly1305`_ as specified for
+ `standardization`_. There are `no practical limits`_ to how much can safely
+ be encrypted under a given key (up to 2⁶⁴ messages each containing up
+ to 2⁶⁴ bytes).
+
+ .. _standardization: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha
+ .. _XChacha20-Poly1305: https://doc.libsodium.org/secret-key_cryptography/aead#xchacha-20-poly1305
+ .. _no practical limits: https://doc.libsodium.org/secret-key_cryptography/aead#limitations
+
+ :param key: The secret key used to encrypt and decrypt messages
+ :param encoder: The encoder class used to decode the given key
+
+ :cvar KEY_SIZE: The size that the key is required to be.
+ :cvar NONCE_SIZE: The size that the nonce is required to be.
+ :cvar MACBYTES: The size of the authentication MAC tag in bytes.
+ :cvar MESSAGEBYTES_MAX: The maximum size of a message which can be
+ safely encrypted with a single key/nonce
+ pair.
+ """
+
+ KEY_SIZE = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_KEYBYTES
+ NONCE_SIZE = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_NPUBBYTES
+ MACBYTES = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_ABYTES
+ MESSAGEBYTES_MAX = (
+ nacl.bindings.crypto_aead_xchacha20poly1305_ietf_MESSAGEBYTES_MAX
+ )
+
+ def __init__(
+ self,
+ key: bytes,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ):
+ key = encoder.decode(key)
+ if not isinstance(key, bytes):
+ raise exc.TypeError("AEAD must be created from 32 bytes")
+
+ if len(key) != self.KEY_SIZE:
+ raise exc.ValueError(
+ "The key must be exactly %s bytes long" % self.KEY_SIZE,
+ )
+
+ self._key = key
+
+ def __bytes__(self) -> bytes:
+ return self._key
+
+ def encrypt(
+ self,
+ plaintext: bytes,
+ aad: bytes = b"",
+ nonce: Optional[bytes] = None,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> EncryptedMessage:
+ """
+ Encrypts the plaintext message using the given `nonce` (or generates
+ one randomly if omitted) and returns the ciphertext encoded with the
+ encoder.
+
+ .. warning:: It is vitally important for :param nonce: to be unique.
+ By default, it is generated randomly; [:class:`Aead`] uses XChacha20
+ for extended (192b) nonce size, so the risk of reusing random nonces
+ is negligible. It is *strongly recommended* to keep this behaviour,
+ as nonce reuse will compromise the privacy of encrypted messages.
+ Should implicit nonces be inadequate for your application, the
+ second best option is using split counters; e.g. if sending messages
+ encrypted under a shared key between 2 users, each user can use the
+ number of messages it sent so far, prefixed or suffixed with a 1bit
+ user id. Note that the counter must **never** be rolled back (due
+ to overflow, on-disk state being rolled back to an earlier backup,
+ ...)
+
+ :param plaintext: [:class:`bytes`] The plaintext message to encrypt
+ :param nonce: [:class:`bytes`] The nonce to use in the encryption
+ :param encoder: The encoder to use to encode the ciphertext
+ :rtype: [:class:`nacl.utils.EncryptedMessage`]
+ """
+ if nonce is None:
+ nonce = random(self.NONCE_SIZE)
+
+ if len(nonce) != self.NONCE_SIZE:
+ raise exc.ValueError(
+ "The nonce must be exactly %s bytes long" % self.NONCE_SIZE,
+ )
+
+ ciphertext = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_encrypt(
+ plaintext, aad, nonce, self._key
+ )
+
+ encoded_nonce = encoder.encode(nonce)
+ encoded_ciphertext = encoder.encode(ciphertext)
+
+ return EncryptedMessage._from_parts(
+ encoded_nonce,
+ encoded_ciphertext,
+ encoder.encode(nonce + ciphertext),
+ )
+
+ def decrypt(
+ self,
+ ciphertext: bytes,
+ aad: bytes = b"",
+ nonce: Optional[bytes] = None,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> bytes:
+ """
+ Decrypts the ciphertext using the `nonce` (explicitly, when passed as a
+ parameter or implicitly, when omitted, as part of the ciphertext) and
+ returns the plaintext message.
+
+ :param ciphertext: [:class:`bytes`] The encrypted message to decrypt
+ :param nonce: [:class:`bytes`] The nonce used when encrypting the
+ ciphertext
+ :param encoder: The encoder used to decode the ciphertext.
+ :rtype: [:class:`bytes`]
+ """
+ # Decode our ciphertext
+ ciphertext = encoder.decode(ciphertext)
+
+ if nonce is None:
+ # If we were given the nonce and ciphertext combined, split them.
+ nonce = ciphertext[: self.NONCE_SIZE]
+ ciphertext = ciphertext[self.NONCE_SIZE :]
+
+ if len(nonce) != self.NONCE_SIZE:
+ raise exc.ValueError(
+ "The nonce must be exactly %s bytes long" % self.NONCE_SIZE,
+ )
+
+ plaintext = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_decrypt(
+ ciphertext, aad, nonce, self._key
+ )
+
+ return plaintext
diff --git a/lib/nacl/signing.py b/lib/nacl/signing.py
new file mode 100644
index 0000000..536b369
--- /dev/null
+++ b/lib/nacl/signing.py
@@ -0,0 +1,250 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Optional
+
+import nacl.bindings
+from nacl import encoding
+from nacl import exceptions as exc
+from nacl.public import (
+ PrivateKey as _Curve25519_PrivateKey,
+ PublicKey as _Curve25519_PublicKey,
+)
+from nacl.utils import StringFixer, random
+
+
+class SignedMessage(bytes):
+ """
+ A bytes subclass that holds a message that has been signed by a
+ :class:`SigningKey`.
+ """
+
+ _signature: bytes
+ _message: bytes
+
+ @classmethod
+ def _from_parts(
+ cls, signature: bytes, message: bytes, combined: bytes
+ ) -> "SignedMessage":
+ obj = cls(combined)
+ obj._signature = signature
+ obj._message = message
+ return obj
+
+ @property
+ def signature(self) -> bytes:
+ """
+ The signature contained within the :class:`SignedMessage`.
+ """
+ return self._signature
+
+ @property
+ def message(self) -> bytes:
+ """
+ The message contained within the :class:`SignedMessage`.
+ """
+ return self._message
+
+
+class VerifyKey(encoding.Encodable, StringFixer):
+ """
+ The public key counterpart to an Ed25519 SigningKey for producing digital
+ signatures.
+
+ :param key: [:class:`bytes`] Serialized Ed25519 public key
+ :param encoder: A class that is able to decode the `key`
+ """
+
+ def __init__(
+ self, key: bytes, encoder: encoding.Encoder = encoding.RawEncoder
+ ):
+ # Decode the key
+ key = encoder.decode(key)
+ if not isinstance(key, bytes):
+ raise exc.TypeError("VerifyKey must be created from 32 bytes")
+
+ if len(key) != nacl.bindings.crypto_sign_PUBLICKEYBYTES:
+ raise exc.ValueError(
+ "The key must be exactly %s bytes long"
+ % nacl.bindings.crypto_sign_PUBLICKEYBYTES,
+ )
+
+ self._key = key
+
+ def __bytes__(self) -> bytes:
+ return self._key
+
+ def __hash__(self) -> int:
+ return hash(bytes(self))
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, self.__class__):
+ return False
+ return nacl.bindings.sodium_memcmp(bytes(self), bytes(other))
+
+ def __ne__(self, other: object) -> bool:
+ return not (self == other)
+
+ def verify(
+ self,
+ smessage: bytes,
+ signature: Optional[bytes] = None,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> bytes:
+ """
+ Verifies the signature of a signed message, returning the message
+ if it has not been tampered with else raising
+ :class:`~nacl.exceptions.BadSignatureError`.
+
+ :param smessage: [:class:`bytes`] Either the original messaged or a
+ signature and message concated together.
+ :param signature: [:class:`bytes`] If an unsigned message is given for
+ smessage then the detached signature must be provided.
+ :param encoder: A class that is able to decode the secret message and
+ signature.
+ :rtype: :class:`bytes`
+ """
+ if signature is not None:
+ # If we were given the message and signature separately, validate
+ # signature size and combine them.
+ if not isinstance(signature, bytes):
+ raise exc.TypeError(
+ "Verification signature must be created from %d bytes"
+ % nacl.bindings.crypto_sign_BYTES,
+ )
+
+ if len(signature) != nacl.bindings.crypto_sign_BYTES:
+ raise exc.ValueError(
+ "The signature must be exactly %d bytes long"
+ % nacl.bindings.crypto_sign_BYTES,
+ )
+
+ smessage = signature + encoder.decode(smessage)
+ else:
+ # Decode the signed message
+ smessage = encoder.decode(smessage)
+
+ return nacl.bindings.crypto_sign_open(smessage, self._key)
+
+ def to_curve25519_public_key(self) -> _Curve25519_PublicKey:
+ """
+ Converts a :class:`~nacl.signing.VerifyKey` to a
+ :class:`~nacl.public.PublicKey`
+
+ :rtype: :class:`~nacl.public.PublicKey`
+ """
+ raw_pk = nacl.bindings.crypto_sign_ed25519_pk_to_curve25519(self._key)
+ return _Curve25519_PublicKey(raw_pk)
+
+
+class SigningKey(encoding.Encodable, StringFixer):
+ """
+ Private key for producing digital signatures using the Ed25519 algorithm.
+
+ Signing keys are produced from a 32-byte (256-bit) random seed value. This
+ value can be passed into the :class:`~nacl.signing.SigningKey` as a
+ :func:`bytes` whose length is 32.
+
+ .. warning:: This **must** be protected and remain secret. Anyone who knows
+ the value of your :class:`~nacl.signing.SigningKey` or it's seed can
+ masquerade as you.
+
+ :param seed: [:class:`bytes`] Random 32-byte value (i.e. private key)
+ :param encoder: A class that is able to decode the seed
+
+ :ivar: verify_key: [:class:`~nacl.signing.VerifyKey`] The verify
+ (i.e. public) key that corresponds with this signing key.
+ """
+
+ def __init__(
+ self,
+ seed: bytes,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ):
+ # Decode the seed
+ seed = encoder.decode(seed)
+ if not isinstance(seed, bytes):
+ raise exc.TypeError(
+ "SigningKey must be created from a 32 byte seed"
+ )
+
+ # Verify that our seed is the proper size
+ if len(seed) != nacl.bindings.crypto_sign_SEEDBYTES:
+ raise exc.ValueError(
+ "The seed must be exactly %d bytes long"
+ % nacl.bindings.crypto_sign_SEEDBYTES
+ )
+
+ public_key, secret_key = nacl.bindings.crypto_sign_seed_keypair(seed)
+
+ self._seed = seed
+ self._signing_key = secret_key
+ self.verify_key = VerifyKey(public_key)
+
+ def __bytes__(self) -> bytes:
+ return self._seed
+
+ def __hash__(self) -> int:
+ return hash(bytes(self))
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, self.__class__):
+ return False
+ return nacl.bindings.sodium_memcmp(bytes(self), bytes(other))
+
+ def __ne__(self, other: object) -> bool:
+ return not (self == other)
+
+ @classmethod
+ def generate(cls) -> "SigningKey":
+ """
+ Generates a random :class:`~nacl.signing.SigningKey` object.
+
+ :rtype: :class:`~nacl.signing.SigningKey`
+ """
+ return cls(
+ random(nacl.bindings.crypto_sign_SEEDBYTES),
+ encoder=encoding.RawEncoder,
+ )
+
+ def sign(
+ self,
+ message: bytes,
+ encoder: encoding.Encoder = encoding.RawEncoder,
+ ) -> SignedMessage:
+ """
+ Sign a message using this key.
+
+ :param message: [:class:`bytes`] The data to be signed.
+ :param encoder: A class that is used to encode the signed message.
+ :rtype: :class:`~nacl.signing.SignedMessage`
+ """
+ raw_signed = nacl.bindings.crypto_sign(message, self._signing_key)
+
+ crypto_sign_BYTES = nacl.bindings.crypto_sign_BYTES
+ signature = encoder.encode(raw_signed[:crypto_sign_BYTES])
+ message = encoder.encode(raw_signed[crypto_sign_BYTES:])
+ signed = encoder.encode(raw_signed)
+
+ return SignedMessage._from_parts(signature, message, signed)
+
+ def to_curve25519_private_key(self) -> _Curve25519_PrivateKey:
+ """
+ Converts a :class:`~nacl.signing.SigningKey` to a
+ :class:`~nacl.public.PrivateKey`
+
+ :rtype: :class:`~nacl.public.PrivateKey`
+ """
+ sk = self._signing_key
+ raw_private = nacl.bindings.crypto_sign_ed25519_sk_to_curve25519(sk)
+ return _Curve25519_PrivateKey(raw_private)
diff --git a/lib/nacl/utils.py b/lib/nacl/utils.py
new file mode 100644
index 0000000..d19d236
--- /dev/null
+++ b/lib/nacl/utils.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Donald Stufft and individual contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+from typing import SupportsBytes, Type, TypeVar
+
+import nacl.bindings
+from nacl import encoding
+
+_EncryptedMessage = TypeVar("_EncryptedMessage", bound="EncryptedMessage")
+
+
+class EncryptedMessage(bytes):
+ """
+ A bytes subclass that holds a messaged that has been encrypted by a
+ :class:`SecretBox`.
+ """
+
+ _nonce: bytes
+ _ciphertext: bytes
+
+ @classmethod
+ def _from_parts(
+ cls: Type[_EncryptedMessage],
+ nonce: bytes,
+ ciphertext: bytes,
+ combined: bytes,
+ ) -> _EncryptedMessage:
+ obj = cls(combined)
+ obj._nonce = nonce
+ obj._ciphertext = ciphertext
+ return obj
+
+ @property
+ def nonce(self) -> bytes:
+ """
+ The nonce used during the encryption of the :class:`EncryptedMessage`.
+ """
+ return self._nonce
+
+ @property
+ def ciphertext(self) -> bytes:
+ """
+ The ciphertext contained within the :class:`EncryptedMessage`.
+ """
+ return self._ciphertext
+
+
+class StringFixer:
+ def __str__(self: SupportsBytes) -> str:
+ return str(self.__bytes__())
+
+
+def bytes_as_string(bytes_in: bytes) -> str:
+ return bytes_in.decode("ascii")
+
+
+def random(size: int = 32) -> bytes:
+ return os.urandom(size)
+
+
+def randombytes_deterministic(
+ size: int, seed: bytes, encoder: encoding.Encoder = encoding.RawEncoder
+) -> bytes:
+ """
+ Returns ``size`` number of deterministically generated pseudorandom bytes
+ from a seed
+
+ :param size: int
+ :param seed: bytes
+ :param encoder: The encoder class used to encode the produced bytes
+ :rtype: bytes
+ """
+ raw_data = nacl.bindings.randombytes_buf_deterministic(size, seed)
+
+ return encoder.encode(raw_data)
diff --git a/lib/paramiko-4.0.0.dist-info/INSTALLER b/lib/paramiko-4.0.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/lib/paramiko-4.0.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/paramiko-4.0.0.dist-info/METADATA b/lib/paramiko-4.0.0.dist-info/METADATA
new file mode 100644
index 0000000..d57a39b
--- /dev/null
+++ b/lib/paramiko-4.0.0.dist-info/METADATA
@@ -0,0 +1,88 @@
+Metadata-Version: 2.4
+Name: paramiko
+Version: 4.0.0
+Summary: SSH2 protocol library
+Author-email: Jeff Forcier
+License-Expression: LGPL-2.1
+Project-URL: Docs, https://docs.paramiko.org
+Project-URL: Source, https://github.com/paramiko/paramiko
+Project-URL: Changelog, https://www.paramiko.org/changelog.html
+Project-URL: CI, https://app.circleci.com/pipelines/github/paramiko/paramiko
+Project-URL: Issues, https://github.com/paramiko/paramiko/issues
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Internet
+Classifier: Topic :: Security :: Cryptography
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Requires-Python: >=3.9
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Requires-Dist: bcrypt>=3.2
+Requires-Dist: cryptography>=3.3
+Requires-Dist: invoke>=2.0
+Requires-Dist: pynacl>=1.5
+Provides-Extra: gssapi
+Requires-Dist: pyasn1>=0.1.7; extra == "gssapi"
+Requires-Dist: gssapi>=1.4.1; platform_system != "Windows" and extra == "gssapi"
+Requires-Dist: pywin32>=2.1.8; platform_system == "Windows" and extra == "gssapi"
+Dynamic: license-file
+
+|version| |python| |license| |ci| |coverage|
+
+.. |version| image:: https://img.shields.io/pypi/v/paramiko
+ :target: https://pypi.org/project/paramiko/
+ :alt: PyPI - Package Version
+.. |python| image:: https://img.shields.io/pypi/pyversions/paramiko
+ :target: https://pypi.org/project/paramiko/
+ :alt: PyPI - Python Version
+.. |license| image:: https://img.shields.io/pypi/l/paramiko
+ :target: https://github.com/paramiko/paramiko/blob/main/LICENSE
+ :alt: PyPI - License
+.. |ci| image:: https://img.shields.io/circleci/build/github/paramiko/paramiko/main
+ :target: https://app.circleci.com/pipelines/github/paramiko/paramiko
+ :alt: CircleCI
+.. |coverage| image:: https://img.shields.io/codecov/c/gh/paramiko/paramiko
+ :target: https://app.codecov.io/gh/paramiko/paramiko
+ :alt: Codecov
+
+Welcome to Paramiko!
+====================
+
+Paramiko is a pure-Python [#]_ implementation of the SSHv2 protocol [#]_,
+providing both client and server functionality. It provides the foundation for
+the high-level SSH library `Fabric `_, which is what we
+recommend you use for common client use-cases such as running remote shell
+commands or transferring files.
+
+Direct use of Paramiko itself is only intended for users who need
+advanced/low-level primitives or want to run an in-Python sshd.
+
+For installation information, changelogs, FAQs and similar, please visit `our
+main project website `_; for API details, see `the
+versioned docs `_. Additionally, the project
+maintainer keeps a `roadmap `_ on his
+personal site.
+
+.. [#]
+ Paramiko relies on `cryptography `_ for crypto
+ functionality, which makes use of C and Rust extensions but has many
+ precompiled options available. See `our installation page
+ `_ for details.
+
+.. [#]
+ OpenSSH's RFC specification page is a fantastic resource and collection of
+ links that we won't bother replicating here:
+ https://www.openssh.com/specs.html
+
+ OpenSSH itself also happens to be our primary reference implementation:
+ when in doubt, we consult how they do things, unless there are good reasons
+ not to. There are always some gaps, but we do our best to reconcile them
+ when possible.
diff --git a/lib/paramiko-4.0.0.dist-info/RECORD b/lib/paramiko-4.0.0.dist-info/RECORD
new file mode 100644
index 0000000..2389dec
--- /dev/null
+++ b/lib/paramiko-4.0.0.dist-info/RECORD
@@ -0,0 +1,95 @@
+paramiko-4.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+paramiko-4.0.0.dist-info/METADATA,sha256=9W89LHpZs7eu34MZOmkAWnscpX_N-pwZN5RNRAWQQTI,3900
+paramiko-4.0.0.dist-info/RECORD,,
+paramiko-4.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+paramiko-4.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
+paramiko-4.0.0.dist-info/licenses/LICENSE,sha256=X6Jb9fOV_SbnAcLh3kyn0WKBaYbceRwi-PQiaFetG7I,26436
+paramiko-4.0.0.dist-info/top_level.txt,sha256=R9n-eCc_1kx1DnijF7Glmm-H67k9jUz5rm2YoPL8n54,9
+paramiko/__init__.py,sha256=aU-VhYiW5aIJosmuPeSteR1h5GeLOXNqdcpkaicsCwg,3523
+paramiko/__pycache__/__init__.cpython-314.pyc,,
+paramiko/__pycache__/_winapi.cpython-314.pyc,,
+paramiko/__pycache__/agent.cpython-314.pyc,,
+paramiko/__pycache__/auth_handler.cpython-314.pyc,,
+paramiko/__pycache__/auth_strategy.cpython-314.pyc,,
+paramiko/__pycache__/ber.cpython-314.pyc,,
+paramiko/__pycache__/buffered_pipe.cpython-314.pyc,,
+paramiko/__pycache__/channel.cpython-314.pyc,,
+paramiko/__pycache__/client.cpython-314.pyc,,
+paramiko/__pycache__/common.cpython-314.pyc,,
+paramiko/__pycache__/compress.cpython-314.pyc,,
+paramiko/__pycache__/config.cpython-314.pyc,,
+paramiko/__pycache__/ecdsakey.cpython-314.pyc,,
+paramiko/__pycache__/ed25519key.cpython-314.pyc,,
+paramiko/__pycache__/file.cpython-314.pyc,,
+paramiko/__pycache__/hostkeys.cpython-314.pyc,,
+paramiko/__pycache__/kex_curve25519.cpython-314.pyc,,
+paramiko/__pycache__/kex_ecdh_nist.cpython-314.pyc,,
+paramiko/__pycache__/kex_gex.cpython-314.pyc,,
+paramiko/__pycache__/kex_group1.cpython-314.pyc,,
+paramiko/__pycache__/kex_group14.cpython-314.pyc,,
+paramiko/__pycache__/kex_group16.cpython-314.pyc,,
+paramiko/__pycache__/kex_gss.cpython-314.pyc,,
+paramiko/__pycache__/message.cpython-314.pyc,,
+paramiko/__pycache__/packet.cpython-314.pyc,,
+paramiko/__pycache__/pipe.cpython-314.pyc,,
+paramiko/__pycache__/pkey.cpython-314.pyc,,
+paramiko/__pycache__/primes.cpython-314.pyc,,
+paramiko/__pycache__/proxy.cpython-314.pyc,,
+paramiko/__pycache__/rsakey.cpython-314.pyc,,
+paramiko/__pycache__/server.cpython-314.pyc,,
+paramiko/__pycache__/sftp.cpython-314.pyc,,
+paramiko/__pycache__/sftp_attr.cpython-314.pyc,,
+paramiko/__pycache__/sftp_client.cpython-314.pyc,,
+paramiko/__pycache__/sftp_file.cpython-314.pyc,,
+paramiko/__pycache__/sftp_handle.cpython-314.pyc,,
+paramiko/__pycache__/sftp_server.cpython-314.pyc,,
+paramiko/__pycache__/sftp_si.cpython-314.pyc,,
+paramiko/__pycache__/ssh_exception.cpython-314.pyc,,
+paramiko/__pycache__/ssh_gss.cpython-314.pyc,,
+paramiko/__pycache__/transport.cpython-314.pyc,,
+paramiko/__pycache__/util.cpython-314.pyc,,
+paramiko/__pycache__/win_openssh.cpython-314.pyc,,
+paramiko/__pycache__/win_pageant.cpython-314.pyc,,
+paramiko/_winapi.py,sha256=e4PyDmHmyLcAkZo4WAX7ah_I6fq4ex7A8FhxOPYAoA8,11204
+paramiko/agent.py,sha256=4vP4knAAzZiSblzSM_srbTYK2hVnUUT561vTBdCe2i4,15877
+paramiko/auth_handler.py,sha256=kMY00x5sUkrcR9uRHIIakQw4E6649oW1tMtIQPrFMFo,43006
+paramiko/auth_strategy.py,sha256=Pjcp8q64gUwk4CneGOnOhW0WBeKBRFURieWqC9AN0Ec,11437
+paramiko/ber.py,sha256=uFb-YokU4Rg2fKjyX8VMAu05STVk37YRgghlNHmdoYo,4369
+paramiko/buffered_pipe.py,sha256=AlkTLHYWbj4W-ZD7ORQZFjEFv7kC7QSvEYypfiHpwxw,7225
+paramiko/channel.py,sha256=MXO-C5dipy8Q0Shh9ceR-CPPiBB-ssT_9oIgwzBhQ_o,49222
+paramiko/client.py,sha256=d1UAVgVf_eWf-VqpwsjhyMFo4IEZcX2-rzZtkomsffY,34337
+paramiko/common.py,sha256=sBJW8KJz_EE8TsT7wLWTPuUiL2nNsLa_cfrTCe9Fyio,7756
+paramiko/compress.py,sha256=RCHTino0cHz1dy1pLbOhFhdWfGl4u50VmBcbT7qBWNc,1282
+paramiko/config.py,sha256=QPzwsk4Vem-Ecg2NhjRu78O9SU5ZO6DmfxZTA6cHWco,27362
+paramiko/ecdsakey.py,sha256=nK8oxORGgLP-zoC2REG46bAchVrlr35jfuxTn_Ac8sM,11653
+paramiko/ed25519key.py,sha256=FYurG0gqxmhNKh_22Hp3XEON5zuvzv-r5w8y9yJQgqY,7457
+paramiko/file.py,sha256=NgbhUjYgrLh-HQtsdYlPZ3CyvS0jhXqePk45GhHPMSo,19063
+paramiko/hostkeys.py,sha256=Ez2gaZF5ntj-vTvMbVXZoLRpU6tBnhSbXJm5FUlvzhw,13144
+paramiko/kex_curve25519.py,sha256=voEFDs_zkgEdWOqDakU-5DLYO3qotWcXYiqOCUP4GDo,4436
+paramiko/kex_ecdh_nist.py,sha256=RbHPwv8Gu5iR9LwMf-N0yUjXEQgRKKBLaAT3dacv44Q,5012
+paramiko/kex_gex.py,sha256=j5fPexu48CGObvpPKn0kZTjdn1onfz0iYhh8p8kIgM0,10320
+paramiko/kex_group1.py,sha256=HfzkLH1SKaIavnN-LGuF-lAMaAECB6Izj_TELhg4Omc,5740
+paramiko/kex_group14.py,sha256=AX7xrTCqMROrMQ_3Dp8WmLkNN8dTovhPjtWgaLLpRxs,1833
+paramiko/kex_group16.py,sha256=s7qB7tSDFkG5ztlg3mV958UVWnKgn1LIA-B2t-h1eX4,2288
+paramiko/kex_gss.py,sha256=BadM1nNN-ORDRuJmb93v0xBGQlce1n29lT4ihsnmY-4,24562
+paramiko/message.py,sha256=wHTWVU_Xgfq-djOOPVF5jAsE-XgADoH47G0iI5N69gY,9349
+paramiko/packet.py,sha256=CocYnZ2Vbz7VRo-6BGMhlRWro7FLIISpxTiYeoEsyaM,24314
+paramiko/pipe.py,sha256=cmWwOyMdys62IGLC9lDznwTu11xLg6wB9mV-60lr86A,3902
+paramiko/pkey.py,sha256=E3hegNR3eS16MMVGEW2v5f_5PBcKjNwqJ_by2HXvfdc,36719
+paramiko/primes.py,sha256=6Uv0fFsTmIJxInMqeNhryw9jrzvgNksKbA7ecBI0g5E,5107
+paramiko/proxy.py,sha256=I5XxN1aDren3Fw1f3SOoQLP4O9O7jeyey9meG6Og0q4,4648
+paramiko/rsakey.py,sha256=7xoDJvfcaZVVYRGlv8xamhO3zYvE-wI_Nd814L8TxzQ,7546
+paramiko/server.py,sha256=oNkI7t2gSMYIwLov5vl_BbHU-AwFC5LxP78YIXw7mq4,30457
+paramiko/sftp.py,sha256=pyZPnR0fv94YopfPDpslloTiYelu5GuM70cXUGOaKHM,6471
+paramiko/sftp_attr.py,sha256=AX-cG_FiPinftQQq8Ndo1Mc_bZz-AhXFQQpac-oV0wg,8258
+paramiko/sftp_client.py,sha256=e_zi6V233tjx3DH9TH7rRDKRO-TCZ_zyOkBw4sSRIjo,35855
+paramiko/sftp_file.py,sha256=NgVfDhxxURhFrEqniIJQgKQ6wlgCTgOVu5GwQczW_hk,21820
+paramiko/sftp_handle.py,sha256=ho-eyiEvhYHt-_VytznNzNeGktfaIsQX5l4bespWZAk,7424
+paramiko/sftp_server.py,sha256=yH-BgsYj7BuZNGn_EHpnLRPmoNGoYB9g_XxOlK4IcYA,19492
+paramiko/sftp_si.py,sha256=Uf90bFme6Jy6yl7k4jJ28IJboq6KiyPWLjXgP9DR6gk,12544
+paramiko/ssh_exception.py,sha256=F82_vTnKr3UF7ai8dTEv6PnqwVoREyk2c9_Bo3smsrg,7494
+paramiko/ssh_gss.py,sha256=BNhiDON1FOJB2P2VQUQHLYJ7RZhTbDjc7NPMqSNwH6Y,28713
+paramiko/transport.py,sha256=BuO3Ai0aaE61rQ5i_WZ7Y-ZYhJqsxIZl0bXDwi5pLKU,135414
+paramiko/util.py,sha256=7eEtwmxiST4Jj3HIqB7irz0SMofJlmy4yuYqda-rqPs,9494
+paramiko/win_openssh.py,sha256=DbWJT0hiE6UImAbMqehcGuVLDWIl-2rObe-AhaGuWpk,1918
+paramiko/win_pageant.py,sha256=i5TG472VzJKVnK08oxM4hK_qb9IzL_Fo96B8ouaxXHo,4177
diff --git a/lib/paramiko-4.0.0.dist-info/REQUESTED b/lib/paramiko-4.0.0.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/lib/paramiko-4.0.0.dist-info/WHEEL b/lib/paramiko-4.0.0.dist-info/WHEEL
new file mode 100644
index 0000000..e7fa31b
--- /dev/null
+++ b/lib/paramiko-4.0.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (80.9.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/lib/paramiko-4.0.0.dist-info/licenses/LICENSE b/lib/paramiko-4.0.0.dist-info/licenses/LICENSE
new file mode 100644
index 0000000..d12bef0
--- /dev/null
+++ b/lib/paramiko-4.0.0.dist-info/licenses/LICENSE
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ , 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/lib/paramiko-4.0.0.dist-info/top_level.txt b/lib/paramiko-4.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000..8608c1b
--- /dev/null
+++ b/lib/paramiko-4.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+paramiko
diff --git a/lib/paramiko/__init__.py b/lib/paramiko/__init__.py
new file mode 100644
index 0000000..92ff86f
--- /dev/null
+++ b/lib/paramiko/__init__.py
@@ -0,0 +1,120 @@
+# Copyright (C) 2003-2011 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from importlib import metadata
+
+__version__ = metadata.version("paramiko")
+
+# flake8: noqa
+from paramiko.transport import (
+ SecurityOptions,
+ ServiceRequestingTransport,
+ Transport,
+)
+from paramiko.client import (
+ AutoAddPolicy,
+ MissingHostKeyPolicy,
+ RejectPolicy,
+ SSHClient,
+ WarningPolicy,
+)
+from paramiko.auth_handler import AuthHandler
+from paramiko.auth_strategy import (
+ AuthFailure,
+ AuthStrategy,
+ AuthResult,
+ AuthSource,
+ InMemoryPrivateKey,
+ NoneAuth,
+ OnDiskPrivateKey,
+ Password,
+ PrivateKey,
+ SourceResult,
+)
+from paramiko.ssh_gss import GSSAuth, GSS_AUTH_AVAILABLE, GSS_EXCEPTIONS
+from paramiko.channel import (
+ Channel,
+ ChannelFile,
+ ChannelStderrFile,
+ ChannelStdinFile,
+)
+from paramiko.ssh_exception import (
+ AuthenticationException,
+ BadAuthenticationType,
+ BadHostKeyException,
+ ChannelException,
+ ConfigParseError,
+ CouldNotCanonicalize,
+ IncompatiblePeer,
+ MessageOrderError,
+ PasswordRequiredException,
+ ProxyCommandFailure,
+ SSHException,
+)
+from paramiko.server import ServerInterface, SubsystemHandler, InteractiveQuery
+from paramiko.rsakey import RSAKey
+from paramiko.ecdsakey import ECDSAKey
+from paramiko.ed25519key import Ed25519Key
+from paramiko.sftp import SFTPError, BaseSFTP
+from paramiko.sftp_client import SFTP, SFTPClient
+from paramiko.sftp_server import SFTPServer
+from paramiko.sftp_attr import SFTPAttributes
+from paramiko.sftp_handle import SFTPHandle
+from paramiko.sftp_si import SFTPServerInterface
+from paramiko.sftp_file import SFTPFile
+from paramiko.message import Message
+from paramiko.packet import Packetizer
+from paramiko.file import BufferedFile
+from paramiko.agent import Agent, AgentKey
+from paramiko.pkey import PKey, PublicBlob, UnknownKeyType
+from paramiko.hostkeys import HostKeys
+from paramiko.config import SSHConfig, SSHConfigDict
+from paramiko.proxy import ProxyCommand
+
+from paramiko.common import (
+ AUTH_SUCCESSFUL,
+ AUTH_PARTIALLY_SUCCESSFUL,
+ AUTH_FAILED,
+ OPEN_SUCCEEDED,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
+ OPEN_FAILED_CONNECT_FAILED,
+ OPEN_FAILED_UNKNOWN_CHANNEL_TYPE,
+ OPEN_FAILED_RESOURCE_SHORTAGE,
+)
+
+from paramiko.sftp import (
+ SFTP_OK,
+ SFTP_EOF,
+ SFTP_NO_SUCH_FILE,
+ SFTP_PERMISSION_DENIED,
+ SFTP_FAILURE,
+ SFTP_BAD_MESSAGE,
+ SFTP_NO_CONNECTION,
+ SFTP_CONNECTION_LOST,
+ SFTP_OP_UNSUPPORTED,
+)
+
+from paramiko.common import io_sleep
+
+
+# TODO: I guess a real plugin system might be nice for future expansion...
+key_classes = [RSAKey, Ed25519Key, ECDSAKey]
+
+
+__author__ = "Jeff Forcier "
+__license__ = "GNU Lesser General Public License (LGPL)"
diff --git a/lib/paramiko/__pycache__/__init__.cpython-314.pyc b/lib/paramiko/__pycache__/__init__.cpython-314.pyc
new file mode 100644
index 0000000..32c0a22
Binary files /dev/null and b/lib/paramiko/__pycache__/__init__.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/_winapi.cpython-314.pyc b/lib/paramiko/__pycache__/_winapi.cpython-314.pyc
new file mode 100644
index 0000000..a9ce577
Binary files /dev/null and b/lib/paramiko/__pycache__/_winapi.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/agent.cpython-314.pyc b/lib/paramiko/__pycache__/agent.cpython-314.pyc
new file mode 100644
index 0000000..39e861e
Binary files /dev/null and b/lib/paramiko/__pycache__/agent.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/auth_handler.cpython-314.pyc b/lib/paramiko/__pycache__/auth_handler.cpython-314.pyc
new file mode 100644
index 0000000..97c4fb1
Binary files /dev/null and b/lib/paramiko/__pycache__/auth_handler.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/auth_strategy.cpython-314.pyc b/lib/paramiko/__pycache__/auth_strategy.cpython-314.pyc
new file mode 100644
index 0000000..cc7af7b
Binary files /dev/null and b/lib/paramiko/__pycache__/auth_strategy.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/ber.cpython-314.pyc b/lib/paramiko/__pycache__/ber.cpython-314.pyc
new file mode 100644
index 0000000..639c598
Binary files /dev/null and b/lib/paramiko/__pycache__/ber.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/buffered_pipe.cpython-314.pyc b/lib/paramiko/__pycache__/buffered_pipe.cpython-314.pyc
new file mode 100644
index 0000000..409a87b
Binary files /dev/null and b/lib/paramiko/__pycache__/buffered_pipe.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/channel.cpython-314.pyc b/lib/paramiko/__pycache__/channel.cpython-314.pyc
new file mode 100644
index 0000000..23a4338
Binary files /dev/null and b/lib/paramiko/__pycache__/channel.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/client.cpython-314.pyc b/lib/paramiko/__pycache__/client.cpython-314.pyc
new file mode 100644
index 0000000..ea5e818
Binary files /dev/null and b/lib/paramiko/__pycache__/client.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/common.cpython-314.pyc b/lib/paramiko/__pycache__/common.cpython-314.pyc
new file mode 100644
index 0000000..ce9de59
Binary files /dev/null and b/lib/paramiko/__pycache__/common.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/compress.cpython-314.pyc b/lib/paramiko/__pycache__/compress.cpython-314.pyc
new file mode 100644
index 0000000..65df4b7
Binary files /dev/null and b/lib/paramiko/__pycache__/compress.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/config.cpython-314.pyc b/lib/paramiko/__pycache__/config.cpython-314.pyc
new file mode 100644
index 0000000..ed978a9
Binary files /dev/null and b/lib/paramiko/__pycache__/config.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/ecdsakey.cpython-314.pyc b/lib/paramiko/__pycache__/ecdsakey.cpython-314.pyc
new file mode 100644
index 0000000..d0a43f4
Binary files /dev/null and b/lib/paramiko/__pycache__/ecdsakey.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/ed25519key.cpython-314.pyc b/lib/paramiko/__pycache__/ed25519key.cpython-314.pyc
new file mode 100644
index 0000000..5cbf994
Binary files /dev/null and b/lib/paramiko/__pycache__/ed25519key.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/file.cpython-314.pyc b/lib/paramiko/__pycache__/file.cpython-314.pyc
new file mode 100644
index 0000000..d3dbae0
Binary files /dev/null and b/lib/paramiko/__pycache__/file.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/hostkeys.cpython-314.pyc b/lib/paramiko/__pycache__/hostkeys.cpython-314.pyc
new file mode 100644
index 0000000..9f51384
Binary files /dev/null and b/lib/paramiko/__pycache__/hostkeys.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/kex_curve25519.cpython-314.pyc b/lib/paramiko/__pycache__/kex_curve25519.cpython-314.pyc
new file mode 100644
index 0000000..42ca82d
Binary files /dev/null and b/lib/paramiko/__pycache__/kex_curve25519.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/kex_ecdh_nist.cpython-314.pyc b/lib/paramiko/__pycache__/kex_ecdh_nist.cpython-314.pyc
new file mode 100644
index 0000000..881db99
Binary files /dev/null and b/lib/paramiko/__pycache__/kex_ecdh_nist.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/kex_gex.cpython-314.pyc b/lib/paramiko/__pycache__/kex_gex.cpython-314.pyc
new file mode 100644
index 0000000..c50b0b6
Binary files /dev/null and b/lib/paramiko/__pycache__/kex_gex.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/kex_group1.cpython-314.pyc b/lib/paramiko/__pycache__/kex_group1.cpython-314.pyc
new file mode 100644
index 0000000..375cd55
Binary files /dev/null and b/lib/paramiko/__pycache__/kex_group1.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/kex_group14.cpython-314.pyc b/lib/paramiko/__pycache__/kex_group14.cpython-314.pyc
new file mode 100644
index 0000000..919e64d
Binary files /dev/null and b/lib/paramiko/__pycache__/kex_group14.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/kex_group16.cpython-314.pyc b/lib/paramiko/__pycache__/kex_group16.cpython-314.pyc
new file mode 100644
index 0000000..a80cc9e
Binary files /dev/null and b/lib/paramiko/__pycache__/kex_group16.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/kex_gss.cpython-314.pyc b/lib/paramiko/__pycache__/kex_gss.cpython-314.pyc
new file mode 100644
index 0000000..19919ad
Binary files /dev/null and b/lib/paramiko/__pycache__/kex_gss.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/message.cpython-314.pyc b/lib/paramiko/__pycache__/message.cpython-314.pyc
new file mode 100644
index 0000000..13f496f
Binary files /dev/null and b/lib/paramiko/__pycache__/message.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/packet.cpython-314.pyc b/lib/paramiko/__pycache__/packet.cpython-314.pyc
new file mode 100644
index 0000000..cd912e2
Binary files /dev/null and b/lib/paramiko/__pycache__/packet.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/pipe.cpython-314.pyc b/lib/paramiko/__pycache__/pipe.cpython-314.pyc
new file mode 100644
index 0000000..c942c2e
Binary files /dev/null and b/lib/paramiko/__pycache__/pipe.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/pkey.cpython-314.pyc b/lib/paramiko/__pycache__/pkey.cpython-314.pyc
new file mode 100644
index 0000000..a3de1e0
Binary files /dev/null and b/lib/paramiko/__pycache__/pkey.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/primes.cpython-314.pyc b/lib/paramiko/__pycache__/primes.cpython-314.pyc
new file mode 100644
index 0000000..1f9a991
Binary files /dev/null and b/lib/paramiko/__pycache__/primes.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/proxy.cpython-314.pyc b/lib/paramiko/__pycache__/proxy.cpython-314.pyc
new file mode 100644
index 0000000..6a8d0a9
Binary files /dev/null and b/lib/paramiko/__pycache__/proxy.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/rsakey.cpython-314.pyc b/lib/paramiko/__pycache__/rsakey.cpython-314.pyc
new file mode 100644
index 0000000..dc45380
Binary files /dev/null and b/lib/paramiko/__pycache__/rsakey.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/server.cpython-314.pyc b/lib/paramiko/__pycache__/server.cpython-314.pyc
new file mode 100644
index 0000000..75b4985
Binary files /dev/null and b/lib/paramiko/__pycache__/server.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/sftp.cpython-314.pyc b/lib/paramiko/__pycache__/sftp.cpython-314.pyc
new file mode 100644
index 0000000..dfacc5f
Binary files /dev/null and b/lib/paramiko/__pycache__/sftp.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/sftp_attr.cpython-314.pyc b/lib/paramiko/__pycache__/sftp_attr.cpython-314.pyc
new file mode 100644
index 0000000..e596576
Binary files /dev/null and b/lib/paramiko/__pycache__/sftp_attr.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/sftp_client.cpython-314.pyc b/lib/paramiko/__pycache__/sftp_client.cpython-314.pyc
new file mode 100644
index 0000000..3f9e7aa
Binary files /dev/null and b/lib/paramiko/__pycache__/sftp_client.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/sftp_file.cpython-314.pyc b/lib/paramiko/__pycache__/sftp_file.cpython-314.pyc
new file mode 100644
index 0000000..dd7e83c
Binary files /dev/null and b/lib/paramiko/__pycache__/sftp_file.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/sftp_handle.cpython-314.pyc b/lib/paramiko/__pycache__/sftp_handle.cpython-314.pyc
new file mode 100644
index 0000000..80d7a6e
Binary files /dev/null and b/lib/paramiko/__pycache__/sftp_handle.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/sftp_server.cpython-314.pyc b/lib/paramiko/__pycache__/sftp_server.cpython-314.pyc
new file mode 100644
index 0000000..0a0e5a1
Binary files /dev/null and b/lib/paramiko/__pycache__/sftp_server.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/sftp_si.cpython-314.pyc b/lib/paramiko/__pycache__/sftp_si.cpython-314.pyc
new file mode 100644
index 0000000..3122e2d
Binary files /dev/null and b/lib/paramiko/__pycache__/sftp_si.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/ssh_exception.cpython-314.pyc b/lib/paramiko/__pycache__/ssh_exception.cpython-314.pyc
new file mode 100644
index 0000000..cdfe93d
Binary files /dev/null and b/lib/paramiko/__pycache__/ssh_exception.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/ssh_gss.cpython-314.pyc b/lib/paramiko/__pycache__/ssh_gss.cpython-314.pyc
new file mode 100644
index 0000000..ef1ccb6
Binary files /dev/null and b/lib/paramiko/__pycache__/ssh_gss.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/transport.cpython-314.pyc b/lib/paramiko/__pycache__/transport.cpython-314.pyc
new file mode 100644
index 0000000..324ddf2
Binary files /dev/null and b/lib/paramiko/__pycache__/transport.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/util.cpython-314.pyc b/lib/paramiko/__pycache__/util.cpython-314.pyc
new file mode 100644
index 0000000..8a0b0a1
Binary files /dev/null and b/lib/paramiko/__pycache__/util.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/win_openssh.cpython-314.pyc b/lib/paramiko/__pycache__/win_openssh.cpython-314.pyc
new file mode 100644
index 0000000..dfa0eb9
Binary files /dev/null and b/lib/paramiko/__pycache__/win_openssh.cpython-314.pyc differ
diff --git a/lib/paramiko/__pycache__/win_pageant.cpython-314.pyc b/lib/paramiko/__pycache__/win_pageant.cpython-314.pyc
new file mode 100644
index 0000000..06acccf
Binary files /dev/null and b/lib/paramiko/__pycache__/win_pageant.cpython-314.pyc differ
diff --git a/lib/paramiko/_winapi.py b/lib/paramiko/_winapi.py
new file mode 100644
index 0000000..4295457
--- /dev/null
+++ b/lib/paramiko/_winapi.py
@@ -0,0 +1,413 @@
+"""
+Windows API functions implemented as ctypes functions and classes as found
+in jaraco.windows (3.4.1).
+
+If you encounter issues with this module, please consider reporting the issues
+in jaraco.windows and asking the author to port the fixes back here.
+"""
+
+import builtins
+import ctypes.wintypes
+
+from paramiko.util import u
+
+
+######################
+# jaraco.windows.error
+
+
+def format_system_message(errno):
+ """
+ Call FormatMessage with a system error number to retrieve
+ the descriptive error message.
+ """
+ # first some flags used by FormatMessageW
+ ALLOCATE_BUFFER = 0x100
+ FROM_SYSTEM = 0x1000
+
+ # Let FormatMessageW allocate the buffer (we'll free it below)
+ # Also, let it know we want a system error message.
+ flags = ALLOCATE_BUFFER | FROM_SYSTEM
+ source = None
+ message_id = errno
+ language_id = 0
+ result_buffer = ctypes.wintypes.LPWSTR()
+ buffer_size = 0
+ arguments = None
+ bytes = ctypes.windll.kernel32.FormatMessageW(
+ flags,
+ source,
+ message_id,
+ language_id,
+ ctypes.byref(result_buffer),
+ buffer_size,
+ arguments,
+ )
+ # note the following will cause an infinite loop if GetLastError
+ # repeatedly returns an error that cannot be formatted, although
+ # this should not happen.
+ handle_nonzero_success(bytes)
+ message = result_buffer.value
+ ctypes.windll.kernel32.LocalFree(result_buffer)
+ return message
+
+
+class WindowsError(builtins.WindowsError):
+ """more info about errors at
+ http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx"""
+
+ def __init__(self, value=None):
+ if value is None:
+ value = ctypes.windll.kernel32.GetLastError()
+ strerror = format_system_message(value)
+ args = 0, strerror, None, value
+ super().__init__(*args)
+
+ @property
+ def message(self):
+ return self.strerror
+
+ @property
+ def code(self):
+ return self.winerror
+
+ def __str__(self):
+ return self.message
+
+ def __repr__(self):
+ return "{self.__class__.__name__}({self.winerror})".format(**vars())
+
+
+def handle_nonzero_success(result):
+ if result == 0:
+ raise WindowsError()
+
+
+###########################
+# jaraco.windows.api.memory
+
+GMEM_MOVEABLE = 0x2
+
+GlobalAlloc = ctypes.windll.kernel32.GlobalAlloc
+GlobalAlloc.argtypes = ctypes.wintypes.UINT, ctypes.c_size_t
+GlobalAlloc.restype = ctypes.wintypes.HANDLE
+
+GlobalLock = ctypes.windll.kernel32.GlobalLock
+GlobalLock.argtypes = (ctypes.wintypes.HGLOBAL,)
+GlobalLock.restype = ctypes.wintypes.LPVOID
+
+GlobalUnlock = ctypes.windll.kernel32.GlobalUnlock
+GlobalUnlock.argtypes = (ctypes.wintypes.HGLOBAL,)
+GlobalUnlock.restype = ctypes.wintypes.BOOL
+
+GlobalSize = ctypes.windll.kernel32.GlobalSize
+GlobalSize.argtypes = (ctypes.wintypes.HGLOBAL,)
+GlobalSize.restype = ctypes.c_size_t
+
+CreateFileMapping = ctypes.windll.kernel32.CreateFileMappingW
+CreateFileMapping.argtypes = [
+ ctypes.wintypes.HANDLE,
+ ctypes.c_void_p,
+ ctypes.wintypes.DWORD,
+ ctypes.wintypes.DWORD,
+ ctypes.wintypes.DWORD,
+ ctypes.wintypes.LPWSTR,
+]
+CreateFileMapping.restype = ctypes.wintypes.HANDLE
+
+MapViewOfFile = ctypes.windll.kernel32.MapViewOfFile
+MapViewOfFile.restype = ctypes.wintypes.HANDLE
+
+UnmapViewOfFile = ctypes.windll.kernel32.UnmapViewOfFile
+UnmapViewOfFile.argtypes = (ctypes.wintypes.HANDLE,)
+
+RtlMoveMemory = ctypes.windll.kernel32.RtlMoveMemory
+RtlMoveMemory.argtypes = (ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)
+
+ctypes.windll.kernel32.LocalFree.argtypes = (ctypes.wintypes.HLOCAL,)
+
+#####################
+# jaraco.windows.mmap
+
+
+class MemoryMap:
+ """
+ A memory map object which can have security attributes overridden.
+ """
+
+ def __init__(self, name, length, security_attributes=None):
+ self.name = name
+ self.length = length
+ self.security_attributes = security_attributes
+ self.pos = 0
+
+ def __enter__(self):
+ p_SA = (
+ ctypes.byref(self.security_attributes)
+ if self.security_attributes
+ else None
+ )
+ INVALID_HANDLE_VALUE = -1
+ PAGE_READWRITE = 0x4
+ FILE_MAP_WRITE = 0x2
+ filemap = ctypes.windll.kernel32.CreateFileMappingW(
+ INVALID_HANDLE_VALUE,
+ p_SA,
+ PAGE_READWRITE,
+ 0,
+ self.length,
+ u(self.name),
+ )
+ handle_nonzero_success(filemap)
+ if filemap == INVALID_HANDLE_VALUE:
+ raise Exception("Failed to create file mapping")
+ self.filemap = filemap
+ self.view = MapViewOfFile(filemap, FILE_MAP_WRITE, 0, 0, 0)
+ return self
+
+ def seek(self, pos):
+ self.pos = pos
+
+ def write(self, msg):
+ assert isinstance(msg, bytes)
+ n = len(msg)
+ if self.pos + n >= self.length: # A little safety.
+ raise ValueError(f"Refusing to write {n} bytes")
+ dest = self.view + self.pos
+ length = ctypes.c_size_t(n)
+ ctypes.windll.kernel32.RtlMoveMemory(dest, msg, length)
+ self.pos += n
+
+ def read(self, n):
+ """
+ Read n bytes from mapped view.
+ """
+ out = ctypes.create_string_buffer(n)
+ source = self.view + self.pos
+ length = ctypes.c_size_t(n)
+ ctypes.windll.kernel32.RtlMoveMemory(out, source, length)
+ self.pos += n
+ return out.raw
+
+ def __exit__(self, exc_type, exc_val, tb):
+ ctypes.windll.kernel32.UnmapViewOfFile(self.view)
+ ctypes.windll.kernel32.CloseHandle(self.filemap)
+
+
+#############################
+# jaraco.windows.api.security
+
+# from WinNT.h
+READ_CONTROL = 0x00020000
+STANDARD_RIGHTS_REQUIRED = 0x000F0000
+STANDARD_RIGHTS_READ = READ_CONTROL
+STANDARD_RIGHTS_WRITE = READ_CONTROL
+STANDARD_RIGHTS_EXECUTE = READ_CONTROL
+STANDARD_RIGHTS_ALL = 0x001F0000
+
+# from NTSecAPI.h
+POLICY_VIEW_LOCAL_INFORMATION = 0x00000001
+POLICY_VIEW_AUDIT_INFORMATION = 0x00000002
+POLICY_GET_PRIVATE_INFORMATION = 0x00000004
+POLICY_TRUST_ADMIN = 0x00000008
+POLICY_CREATE_ACCOUNT = 0x00000010
+POLICY_CREATE_SECRET = 0x00000020
+POLICY_CREATE_PRIVILEGE = 0x00000040
+POLICY_SET_DEFAULT_QUOTA_LIMITS = 0x00000080
+POLICY_SET_AUDIT_REQUIREMENTS = 0x00000100
+POLICY_AUDIT_LOG_ADMIN = 0x00000200
+POLICY_SERVER_ADMIN = 0x00000400
+POLICY_LOOKUP_NAMES = 0x00000800
+POLICY_NOTIFICATION = 0x00001000
+
+POLICY_ALL_ACCESS = (
+ STANDARD_RIGHTS_REQUIRED
+ | POLICY_VIEW_LOCAL_INFORMATION
+ | POLICY_VIEW_AUDIT_INFORMATION
+ | POLICY_GET_PRIVATE_INFORMATION
+ | POLICY_TRUST_ADMIN
+ | POLICY_CREATE_ACCOUNT
+ | POLICY_CREATE_SECRET
+ | POLICY_CREATE_PRIVILEGE
+ | POLICY_SET_DEFAULT_QUOTA_LIMITS
+ | POLICY_SET_AUDIT_REQUIREMENTS
+ | POLICY_AUDIT_LOG_ADMIN
+ | POLICY_SERVER_ADMIN
+ | POLICY_LOOKUP_NAMES
+)
+
+
+POLICY_READ = (
+ STANDARD_RIGHTS_READ
+ | POLICY_VIEW_AUDIT_INFORMATION
+ | POLICY_GET_PRIVATE_INFORMATION
+)
+
+POLICY_WRITE = (
+ STANDARD_RIGHTS_WRITE
+ | POLICY_TRUST_ADMIN
+ | POLICY_CREATE_ACCOUNT
+ | POLICY_CREATE_SECRET
+ | POLICY_CREATE_PRIVILEGE
+ | POLICY_SET_DEFAULT_QUOTA_LIMITS
+ | POLICY_SET_AUDIT_REQUIREMENTS
+ | POLICY_AUDIT_LOG_ADMIN
+ | POLICY_SERVER_ADMIN
+)
+
+POLICY_EXECUTE = (
+ STANDARD_RIGHTS_EXECUTE
+ | POLICY_VIEW_LOCAL_INFORMATION
+ | POLICY_LOOKUP_NAMES
+)
+
+
+class TokenAccess:
+ TOKEN_QUERY = 0x8
+
+
+class TokenInformationClass:
+ TokenUser = 1
+
+
+class TOKEN_USER(ctypes.Structure):
+ num = 1
+ _fields_ = [
+ ("SID", ctypes.c_void_p),
+ ("ATTRIBUTES", ctypes.wintypes.DWORD),
+ ]
+
+
+class SECURITY_DESCRIPTOR(ctypes.Structure):
+ """
+ typedef struct _SECURITY_DESCRIPTOR
+ {
+ UCHAR Revision;
+ UCHAR Sbz1;
+ SECURITY_DESCRIPTOR_CONTROL Control;
+ PSID Owner;
+ PSID Group;
+ PACL Sacl;
+ PACL Dacl;
+ } SECURITY_DESCRIPTOR;
+ """
+
+ SECURITY_DESCRIPTOR_CONTROL = ctypes.wintypes.USHORT
+ REVISION = 1
+
+ _fields_ = [
+ ("Revision", ctypes.c_ubyte),
+ ("Sbz1", ctypes.c_ubyte),
+ ("Control", SECURITY_DESCRIPTOR_CONTROL),
+ ("Owner", ctypes.c_void_p),
+ ("Group", ctypes.c_void_p),
+ ("Sacl", ctypes.c_void_p),
+ ("Dacl", ctypes.c_void_p),
+ ]
+
+
+class SECURITY_ATTRIBUTES(ctypes.Structure):
+ """
+ typedef struct _SECURITY_ATTRIBUTES {
+ DWORD nLength;
+ LPVOID lpSecurityDescriptor;
+ BOOL bInheritHandle;
+ } SECURITY_ATTRIBUTES;
+ """
+
+ _fields_ = [
+ ("nLength", ctypes.wintypes.DWORD),
+ ("lpSecurityDescriptor", ctypes.c_void_p),
+ ("bInheritHandle", ctypes.wintypes.BOOL),
+ ]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.nLength = ctypes.sizeof(SECURITY_ATTRIBUTES)
+
+ @property
+ def descriptor(self):
+ return self._descriptor
+
+ @descriptor.setter
+ def descriptor(self, value):
+ self._descriptor = value
+ self.lpSecurityDescriptor = ctypes.addressof(value)
+
+
+ctypes.windll.advapi32.SetSecurityDescriptorOwner.argtypes = (
+ ctypes.POINTER(SECURITY_DESCRIPTOR),
+ ctypes.c_void_p,
+ ctypes.wintypes.BOOL,
+)
+
+#########################
+# jaraco.windows.security
+
+
+def GetTokenInformation(token, information_class):
+ """
+ Given a token, get the token information for it.
+ """
+ data_size = ctypes.wintypes.DWORD()
+ ctypes.windll.advapi32.GetTokenInformation(
+ token, information_class.num, 0, 0, ctypes.byref(data_size)
+ )
+ data = ctypes.create_string_buffer(data_size.value)
+ handle_nonzero_success(
+ ctypes.windll.advapi32.GetTokenInformation(
+ token,
+ information_class.num,
+ ctypes.byref(data),
+ ctypes.sizeof(data),
+ ctypes.byref(data_size),
+ )
+ )
+ return ctypes.cast(data, ctypes.POINTER(TOKEN_USER)).contents
+
+
+def OpenProcessToken(proc_handle, access):
+ result = ctypes.wintypes.HANDLE()
+ proc_handle = ctypes.wintypes.HANDLE(proc_handle)
+ handle_nonzero_success(
+ ctypes.windll.advapi32.OpenProcessToken(
+ proc_handle, access, ctypes.byref(result)
+ )
+ )
+ return result
+
+
+def get_current_user():
+ """
+ Return a TOKEN_USER for the owner of this process.
+ """
+ process = OpenProcessToken(
+ ctypes.windll.kernel32.GetCurrentProcess(), TokenAccess.TOKEN_QUERY
+ )
+ return GetTokenInformation(process, TOKEN_USER)
+
+
+def get_security_attributes_for_user(user=None):
+ """
+ Return a SECURITY_ATTRIBUTES structure with the SID set to the
+ specified user (uses current user if none is specified).
+ """
+ if user is None:
+ user = get_current_user()
+
+ assert isinstance(user, TOKEN_USER), "user must be TOKEN_USER instance"
+
+ SD = SECURITY_DESCRIPTOR()
+ SA = SECURITY_ATTRIBUTES()
+ # by attaching the actual security descriptor, it will be garbage-
+ # collected with the security attributes
+ SA.descriptor = SD
+ SA.bInheritHandle = 1
+
+ ctypes.windll.advapi32.InitializeSecurityDescriptor(
+ ctypes.byref(SD), SECURITY_DESCRIPTOR.REVISION
+ )
+ ctypes.windll.advapi32.SetSecurityDescriptorOwner(
+ ctypes.byref(SD), user.SID, 0
+ )
+ return SA
diff --git a/lib/paramiko/agent.py b/lib/paramiko/agent.py
new file mode 100644
index 0000000..b29a0d1
--- /dev/null
+++ b/lib/paramiko/agent.py
@@ -0,0 +1,497 @@
+# Copyright (C) 2003-2007 John Rochester
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+SSH Agent interface
+"""
+
+import os
+import socket
+import struct
+import sys
+import threading
+import time
+import tempfile
+import stat
+from logging import DEBUG
+from select import select
+from paramiko.common import io_sleep, byte_chr
+
+from paramiko.ssh_exception import SSHException, AuthenticationException
+from paramiko.message import Message
+from paramiko.pkey import PKey, UnknownKeyType
+from paramiko.util import asbytes, get_logger
+
+cSSH2_AGENTC_REQUEST_IDENTITIES = byte_chr(11)
+SSH2_AGENT_IDENTITIES_ANSWER = 12
+cSSH2_AGENTC_SIGN_REQUEST = byte_chr(13)
+SSH2_AGENT_SIGN_RESPONSE = 14
+
+SSH_AGENT_RSA_SHA2_256 = 2
+SSH_AGENT_RSA_SHA2_512 = 4
+# NOTE: RFC mildly confusing; while these flags are OR'd together, OpenSSH at
+# least really treats them like "AND"s, in the sense that if it finds the
+# SHA256 flag set it won't continue looking at the SHA512 one; it
+# short-circuits right away.
+# Thus, we never want to eg submit 6 to say "either's good".
+ALGORITHM_FLAG_MAP = {
+ "rsa-sha2-256": SSH_AGENT_RSA_SHA2_256,
+ "rsa-sha2-512": SSH_AGENT_RSA_SHA2_512,
+}
+for key, value in list(ALGORITHM_FLAG_MAP.items()):
+ ALGORITHM_FLAG_MAP[f"{key}-cert-v01@openssh.com"] = value
+
+
+# TODO 4.0: rename all these - including making some of their methods public?
+class AgentSSH:
+ def __init__(self):
+ self._conn = None
+ self._keys = ()
+
+ def get_keys(self):
+ """
+ Return the list of keys available through the SSH agent, if any. If
+ no SSH agent was running (or it couldn't be contacted), an empty list
+ will be returned.
+
+ This method performs no IO, just returns the list of keys retrieved
+ when the connection was made.
+
+ :return:
+ a tuple of `.AgentKey` objects representing keys available on the
+ SSH agent
+ """
+ return self._keys
+
+ def _connect(self, conn):
+ self._conn = conn
+ ptype, result = self._send_message(cSSH2_AGENTC_REQUEST_IDENTITIES)
+ if ptype != SSH2_AGENT_IDENTITIES_ANSWER:
+ raise SSHException("could not get keys from ssh-agent")
+ keys = []
+ for i in range(result.get_int()):
+ keys.append(
+ AgentKey(
+ agent=self,
+ blob=result.get_binary(),
+ comment=result.get_text(),
+ )
+ )
+ self._keys = tuple(keys)
+
+ def _close(self):
+ if self._conn is not None:
+ self._conn.close()
+ self._conn = None
+ self._keys = ()
+
+ def _send_message(self, msg):
+ msg = asbytes(msg)
+ self._conn.send(struct.pack(">I", len(msg)) + msg)
+ data = self._read_all(4)
+ msg = Message(self._read_all(struct.unpack(">I", data)[0]))
+ return ord(msg.get_byte()), msg
+
+ def _read_all(self, wanted):
+ result = self._conn.recv(wanted)
+ while len(result) < wanted:
+ if len(result) == 0:
+ raise SSHException("lost ssh-agent")
+ extra = self._conn.recv(wanted - len(result))
+ if len(extra) == 0:
+ raise SSHException("lost ssh-agent")
+ result += extra
+ return result
+
+
+class AgentProxyThread(threading.Thread):
+ """
+ Class in charge of communication between two channels.
+ """
+
+ def __init__(self, agent):
+ threading.Thread.__init__(self, target=self.run)
+ self._agent = agent
+ self._exit = False
+
+ def run(self):
+ try:
+ (r, addr) = self.get_connection()
+ # Found that r should be either
+ # a socket from the socket library or None
+ self.__inr = r
+ # The address should be an IP address as a string? or None
+ self.__addr = addr
+ self._agent.connect()
+ if not isinstance(self._agent, int) and (
+ self._agent._conn is None
+ or not hasattr(self._agent._conn, "fileno")
+ ):
+ raise AuthenticationException("Unable to connect to SSH agent")
+ self._communicate()
+ except:
+ # XXX Not sure what to do here ... raise or pass ?
+ raise
+
+ def _communicate(self):
+ import fcntl
+
+ oldflags = fcntl.fcntl(self.__inr, fcntl.F_GETFL)
+ fcntl.fcntl(self.__inr, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
+ while not self._exit:
+ events = select([self._agent._conn, self.__inr], [], [], 0.5)
+ for fd in events[0]:
+ if self._agent._conn == fd:
+ data = self._agent._conn.recv(512)
+ if len(data) != 0:
+ self.__inr.send(data)
+ else:
+ self._close()
+ break
+ elif self.__inr == fd:
+ data = self.__inr.recv(512)
+ if len(data) != 0:
+ self._agent._conn.send(data)
+ else:
+ self._close()
+ break
+ time.sleep(io_sleep)
+
+ def _close(self):
+ self._exit = True
+ self.__inr.close()
+ self._agent._conn.close()
+
+
+class AgentLocalProxy(AgentProxyThread):
+ """
+ Class to be used when wanting to ask a local SSH Agent being
+ asked from a remote fake agent (so use a unix socket for ex.)
+ """
+
+ def __init__(self, agent):
+ AgentProxyThread.__init__(self, agent)
+
+ def get_connection(self):
+ """
+ Return a pair of socket object and string address.
+
+ May block!
+ """
+ conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ conn.bind(self._agent._get_filename())
+ conn.listen(1)
+ (r, addr) = conn.accept()
+ return r, addr
+ except:
+ raise
+
+
+class AgentRemoteProxy(AgentProxyThread):
+ """
+ Class to be used when wanting to ask a remote SSH Agent
+ """
+
+ def __init__(self, agent, chan):
+ AgentProxyThread.__init__(self, agent)
+ self.__chan = chan
+
+ def get_connection(self):
+ return self.__chan, None
+
+
+def get_agent_connection():
+ """
+ Returns some SSH agent object, or None if none were found/supported.
+
+ .. versionadded:: 2.10
+ """
+ if ("SSH_AUTH_SOCK" in os.environ) and (sys.platform != "win32"):
+ conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ conn.connect(os.environ["SSH_AUTH_SOCK"])
+ return conn
+ except:
+ # probably a dangling env var: the ssh agent is gone
+ return
+ elif sys.platform == "win32":
+ from . import win_pageant, win_openssh
+
+ conn = None
+ if win_pageant.can_talk_to_agent():
+ conn = win_pageant.PageantConnection()
+ elif win_openssh.can_talk_to_agent():
+ conn = win_openssh.OpenSSHAgentConnection()
+ return conn
+ else:
+ # no agent support
+ return
+
+
+class AgentClientProxy:
+ """
+ Class proxying request as a client:
+
+ #. client ask for a request_forward_agent()
+ #. server creates a proxy and a fake SSH Agent
+ #. server ask for establishing a connection when needed,
+ calling the forward_agent_handler at client side.
+ #. the forward_agent_handler launch a thread for connecting
+ the remote fake agent and the local agent
+ #. Communication occurs ...
+ """
+
+ def __init__(self, chanRemote):
+ self._conn = None
+ self.__chanR = chanRemote
+ self.thread = AgentRemoteProxy(self, chanRemote)
+ self.thread.start()
+
+ def __del__(self):
+ self.close()
+
+ def connect(self):
+ """
+ Method automatically called by ``AgentProxyThread.run``.
+ """
+ conn = get_agent_connection()
+ if not conn:
+ return
+ self._conn = conn
+
+ def close(self):
+ """
+ Close the current connection and terminate the agent
+ Should be called manually
+ """
+ if hasattr(self, "thread"):
+ self.thread._exit = True
+ self.thread.join(1000)
+ if self._conn is not None:
+ self._conn.close()
+
+
+class AgentServerProxy(AgentSSH):
+ """
+ Allows an SSH server to access a forwarded agent.
+
+ This also creates a unix domain socket on the system to allow external
+ programs to also access the agent. For this reason, you probably only want
+ to create one of these.
+
+ :meth:`connect` must be called before it is usable. This will also load the
+ list of keys the agent contains. You must also call :meth:`close` in
+ order to clean up the unix socket and the thread that maintains it.
+ (:class:`contextlib.closing` might be helpful to you.)
+
+ :param .Transport t: Transport used for SSH Agent communication forwarding
+
+ :raises: `.SSHException` -- mostly if we lost the agent
+ """
+
+ def __init__(self, t):
+ AgentSSH.__init__(self)
+ self.__t = t
+ self._dir = tempfile.mkdtemp("sshproxy")
+ os.chmod(self._dir, stat.S_IRWXU)
+ self._file = self._dir + "/sshproxy.ssh"
+ self.thread = AgentLocalProxy(self)
+ self.thread.start()
+
+ def __del__(self):
+ self.close()
+
+ def connect(self):
+ conn_sock = self.__t.open_forward_agent_channel()
+ if conn_sock is None:
+ raise SSHException("lost ssh-agent")
+ conn_sock.set_name("auth-agent")
+ self._connect(conn_sock)
+
+ def close(self):
+ """
+ Terminate the agent, clean the files, close connections
+ Should be called manually
+ """
+ os.remove(self._file)
+ os.rmdir(self._dir)
+ self.thread._exit = True
+ self.thread.join(1000)
+ self._close()
+
+ def get_env(self):
+ """
+ Helper for the environment under unix
+
+ :return:
+ a dict containing the ``SSH_AUTH_SOCK`` environment variables
+ """
+ return {"SSH_AUTH_SOCK": self._get_filename()}
+
+ def _get_filename(self):
+ return self._file
+
+
+class AgentRequestHandler:
+ """
+ Primary/default implementation of SSH agent forwarding functionality.
+
+ Simply instantiate this class, handing it a live command-executing session
+ object, and it will handle forwarding any local SSH agent processes it
+ finds.
+
+ For example::
+
+ # Connect
+ client = SSHClient()
+ client.connect(host, port, username)
+ # Obtain session
+ session = client.get_transport().open_session()
+ # Forward local agent
+ AgentRequestHandler(session)
+ # Commands executed after this point will see the forwarded agent on
+ # the remote end.
+ session.exec_command("git clone https://my.git.repository/")
+ """
+
+ def __init__(self, chanClient):
+ self._conn = None
+ self.__chanC = chanClient
+ chanClient.request_forward_agent(self._forward_agent_handler)
+ self.__clientProxys = []
+
+ def _forward_agent_handler(self, chanRemote):
+ self.__clientProxys.append(AgentClientProxy(chanRemote))
+
+ def __del__(self):
+ self.close()
+
+ def close(self):
+ for p in self.__clientProxys:
+ p.close()
+
+
+class Agent(AgentSSH):
+ """
+ Client interface for using private keys from an SSH agent running on the
+ local machine. If an SSH agent is running, this class can be used to
+ connect to it and retrieve `.PKey` objects which can be used when
+ attempting to authenticate to remote SSH servers.
+
+ Upon initialization, a session with the local machine's SSH agent is
+ opened, if one is running. If no agent is running, initialization will
+ succeed, but `get_keys` will return an empty tuple.
+
+ :raises: `.SSHException` --
+ if an SSH agent is found, but speaks an incompatible protocol
+
+ .. versionchanged:: 2.10
+ Added support for native openssh agent on windows (extending previous
+ putty pageant support)
+ """
+
+ def __init__(self):
+ AgentSSH.__init__(self)
+
+ conn = get_agent_connection()
+ if not conn:
+ return
+ self._connect(conn)
+
+ def close(self):
+ """
+ Close the SSH agent connection.
+ """
+ self._close()
+
+
+class AgentKey(PKey):
+ """
+ Private key held in a local SSH agent. This type of key can be used for
+ authenticating to a remote server (signing). Most other key operations
+ work as expected.
+
+ .. versionchanged:: 3.2
+ Added the ``comment`` kwarg and attribute.
+
+ .. versionchanged:: 3.2
+ Added the ``.inner_key`` attribute holding a reference to the 'real'
+ key instance this key is a proxy for, if one was obtainable, else None.
+ """
+
+ def __init__(self, agent, blob, comment=""):
+ self.agent = agent
+ self.blob = blob
+ self.comment = comment
+ msg = Message(blob)
+ self.name = msg.get_text()
+ self._logger = get_logger(__file__)
+ self.inner_key = None
+ try:
+ self.inner_key = PKey.from_type_string(
+ key_type=self.name, key_bytes=blob
+ )
+ except UnknownKeyType:
+ # Log, but don't explode, since inner_key is a best-effort thing.
+ err = "Unable to derive inner_key for agent key of type {!r}"
+ self.log(DEBUG, err.format(self.name))
+
+ def log(self, *args, **kwargs):
+ return self._logger.log(*args, **kwargs)
+
+ def asbytes(self):
+ # Prefer inner_key.asbytes, since that will differ for eg RSA-CERT
+ return self.inner_key.asbytes() if self.inner_key else self.blob
+
+ def get_name(self):
+ return self.name
+
+ def get_bits(self):
+ # Have to work around PKey's default get_bits being crap
+ if self.inner_key is not None:
+ return self.inner_key.get_bits()
+ return super().get_bits()
+
+ def __getattr__(self, name):
+ """
+ Proxy any un-implemented methods/properties to the inner_key.
+ """
+ if self.inner_key is None: # nothing to proxy to
+ raise AttributeError(name)
+ return getattr(self.inner_key, name)
+
+ @property
+ def _fields(self):
+ fallback = [self.get_name(), self.blob]
+ return self.inner_key._fields if self.inner_key else fallback
+
+ def sign_ssh_data(self, data, algorithm=None):
+ msg = Message()
+ msg.add_byte(cSSH2_AGENTC_SIGN_REQUEST)
+ # NOTE: this used to be just self.blob, which is not entirely right for
+ # RSA-CERT 'keys' - those end up always degrading to ssh-rsa type
+ # signatures, for reasons probably internal to OpenSSH's agent code,
+ # even if everything else wants SHA2 (including our flag map).
+ msg.add_string(self.asbytes())
+ msg.add_string(data)
+ msg.add_int(ALGORITHM_FLAG_MAP.get(algorithm, 0))
+ ptype, result = self.agent._send_message(msg)
+ if ptype != SSH2_AGENT_SIGN_RESPONSE:
+ raise SSHException("key cannot be used for signing")
+ return result.get_binary()
diff --git a/lib/paramiko/auth_handler.py b/lib/paramiko/auth_handler.py
new file mode 100644
index 0000000..bc7f298
--- /dev/null
+++ b/lib/paramiko/auth_handler.py
@@ -0,0 +1,1092 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+`.AuthHandler`
+"""
+
+import weakref
+import threading
+import time
+import re
+
+from paramiko.common import (
+ cMSG_SERVICE_REQUEST,
+ cMSG_DISCONNECT,
+ DISCONNECT_SERVICE_NOT_AVAILABLE,
+ DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
+ cMSG_USERAUTH_REQUEST,
+ cMSG_SERVICE_ACCEPT,
+ DEBUG,
+ AUTH_SUCCESSFUL,
+ INFO,
+ cMSG_USERAUTH_SUCCESS,
+ cMSG_USERAUTH_FAILURE,
+ AUTH_PARTIALLY_SUCCESSFUL,
+ cMSG_USERAUTH_INFO_REQUEST,
+ WARNING,
+ AUTH_FAILED,
+ cMSG_USERAUTH_PK_OK,
+ cMSG_USERAUTH_INFO_RESPONSE,
+ MSG_SERVICE_REQUEST,
+ MSG_SERVICE_ACCEPT,
+ MSG_USERAUTH_REQUEST,
+ MSG_USERAUTH_SUCCESS,
+ MSG_USERAUTH_FAILURE,
+ MSG_USERAUTH_BANNER,
+ MSG_USERAUTH_INFO_REQUEST,
+ MSG_USERAUTH_INFO_RESPONSE,
+ cMSG_USERAUTH_GSSAPI_RESPONSE,
+ cMSG_USERAUTH_GSSAPI_TOKEN,
+ cMSG_USERAUTH_GSSAPI_MIC,
+ MSG_USERAUTH_GSSAPI_RESPONSE,
+ MSG_USERAUTH_GSSAPI_TOKEN,
+ MSG_USERAUTH_GSSAPI_ERROR,
+ MSG_USERAUTH_GSSAPI_ERRTOK,
+ MSG_USERAUTH_GSSAPI_MIC,
+ MSG_NAMES,
+ cMSG_USERAUTH_BANNER,
+)
+from paramiko.message import Message
+from paramiko.util import b, u
+from paramiko.ssh_exception import (
+ SSHException,
+ AuthenticationException,
+ BadAuthenticationType,
+ PartialAuthentication,
+)
+from paramiko.server import InteractiveQuery
+from paramiko.ssh_gss import GSSAuth, GSS_EXCEPTIONS
+
+
+class AuthHandler:
+ """
+ Internal class to handle the mechanics of authentication.
+ """
+
+ def __init__(self, transport):
+ self.transport = weakref.proxy(transport)
+ self.username = None
+ self.authenticated = False
+ self.auth_event = None
+ self.auth_method = ""
+ self.banner = None
+ self.password = None
+ self.private_key = None
+ self.interactive_handler = None
+ self.submethods = None
+ # for server mode:
+ self.auth_username = None
+ self.auth_fail_count = 0
+ # for GSSAPI
+ self.gss_host = None
+ self.gss_deleg_creds = True
+
+ def _log(self, *args):
+ return self.transport._log(*args)
+
+ def is_authenticated(self):
+ return self.authenticated
+
+ def get_username(self):
+ if self.transport.server_mode:
+ return self.auth_username
+ else:
+ return self.username
+
+ def auth_none(self, username, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "none"
+ self.username = username
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_publickey(self, username, key, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "publickey"
+ self.username = username
+ self.private_key = key
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_password(self, username, password, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "password"
+ self.username = username
+ self.password = password
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_interactive(self, username, handler, event, submethods=""):
+ """
+ response_list = handler(title, instructions, prompt_list)
+ """
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "keyboard-interactive"
+ self.username = username
+ self.interactive_handler = handler
+ self.submethods = submethods
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "gssapi-with-mic"
+ self.username = username
+ self.gss_host = gss_host
+ self.gss_deleg_creds = gss_deleg_creds
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def auth_gssapi_keyex(self, username, event):
+ self.transport.lock.acquire()
+ try:
+ self.auth_event = event
+ self.auth_method = "gssapi-keyex"
+ self.username = username
+ self._request_auth()
+ finally:
+ self.transport.lock.release()
+
+ def abort(self):
+ if self.auth_event is not None:
+ self.auth_event.set()
+
+ # ...internals...
+
+ def _request_auth(self):
+ m = Message()
+ m.add_byte(cMSG_SERVICE_REQUEST)
+ m.add_string("ssh-userauth")
+ self.transport._send_message(m)
+
+ def _disconnect_service_not_available(self):
+ m = Message()
+ m.add_byte(cMSG_DISCONNECT)
+ m.add_int(DISCONNECT_SERVICE_NOT_AVAILABLE)
+ m.add_string("Service not available")
+ m.add_string("en")
+ self.transport._send_message(m)
+ self.transport.close()
+
+ def _disconnect_no_more_auth(self):
+ m = Message()
+ m.add_byte(cMSG_DISCONNECT)
+ m.add_int(DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE)
+ m.add_string("No more auth methods available")
+ m.add_string("en")
+ self.transport._send_message(m)
+ self.transport.close()
+
+ def _get_key_type_and_bits(self, key):
+ """
+ Given any key, return its type/algorithm & bits-to-sign.
+
+ Intended for input to or verification of, key signatures.
+ """
+ # Use certificate contents, if available, plain pubkey otherwise
+ if key.public_blob:
+ return key.public_blob.key_type, key.public_blob.key_blob
+ else:
+ return key.get_name(), key
+
+ def _get_session_blob(self, key, service, username, algorithm):
+ m = Message()
+ m.add_string(self.transport.session_id)
+ m.add_byte(cMSG_USERAUTH_REQUEST)
+ m.add_string(username)
+ m.add_string(service)
+ m.add_string("publickey")
+ m.add_boolean(True)
+ _, bits = self._get_key_type_and_bits(key)
+ m.add_string(algorithm)
+ m.add_string(bits)
+ return m.asbytes()
+
+ def wait_for_response(self, event):
+ max_ts = None
+ if self.transport.auth_timeout is not None:
+ max_ts = time.time() + self.transport.auth_timeout
+ while True:
+ event.wait(0.1)
+ if not self.transport.is_active():
+ e = self.transport.get_exception()
+ if (e is None) or issubclass(e.__class__, EOFError):
+ e = AuthenticationException(
+ "Authentication failed: transport shut down or saw EOF"
+ )
+ raise e
+ if event.is_set():
+ break
+ if max_ts is not None and max_ts <= time.time():
+ raise AuthenticationException("Authentication timeout.")
+
+ if not self.is_authenticated():
+ e = self.transport.get_exception()
+ if e is None:
+ e = AuthenticationException("Authentication failed.")
+ # this is horrible. Python Exception isn't yet descended from
+ # object, so type(e) won't work. :(
+ # TODO 4.0: lol. just lmao.
+ if issubclass(e.__class__, PartialAuthentication):
+ return e.allowed_types
+ raise e
+ return []
+
+ def _parse_service_request(self, m):
+ service = m.get_text()
+ if self.transport.server_mode and (service == "ssh-userauth"):
+ # accepted
+ m = Message()
+ m.add_byte(cMSG_SERVICE_ACCEPT)
+ m.add_string(service)
+ self.transport._send_message(m)
+ banner, language = self.transport.server_object.get_banner()
+ if banner:
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_BANNER)
+ m.add_string(banner)
+ m.add_string(language)
+ self.transport._send_message(m)
+ return
+ # dunno this one
+ self._disconnect_service_not_available()
+
+ def _generate_key_from_request(self, algorithm, keyblob):
+ # For use in server mode.
+ options = self.transport.preferred_pubkeys
+ if algorithm.replace("-cert-v01@openssh.com", "") not in options:
+ err = (
+ "Auth rejected: pubkey algorithm '{}' unsupported or disabled"
+ )
+ self._log(INFO, err.format(algorithm))
+ return None
+ return self.transport._key_info[algorithm](Message(keyblob))
+
+ def _choose_fallback_pubkey_algorithm(self, key_type, my_algos):
+ # Fallback: first one in our (possibly tweaked by caller) list
+ pubkey_algo = my_algos[0]
+ msg = "Server did not send a server-sig-algs list; defaulting to our first preferred algo ({!r})" # noqa
+ self._log(DEBUG, msg.format(pubkey_algo))
+ self._log(
+ DEBUG,
+ "NOTE: you may use the 'disabled_algorithms' SSHClient/Transport init kwarg to disable that or other algorithms if your server does not support them!", # noqa
+ )
+ return pubkey_algo
+
+ def _finalize_pubkey_algorithm(self, key_type):
+ # Short-circuit for non-RSA keys
+ if "rsa" not in key_type:
+ return key_type
+ self._log(
+ DEBUG,
+ "Finalizing pubkey algorithm for key of type {!r}".format(
+ key_type
+ ),
+ )
+ # NOTE re #2017: When the key is an RSA cert and the remote server is
+ # OpenSSH 7.7 or earlier, always use ssh-rsa-cert-v01@openssh.com.
+ # Those versions of the server won't support rsa-sha2 family sig algos
+ # for certs specifically, and in tandem with various server bugs
+ # regarding server-sig-algs, it's impossible to fit this into the rest
+ # of the logic here.
+ if key_type.endswith("-cert-v01@openssh.com") and re.search(
+ r"-OpenSSH_(?:[1-6]|7\.[0-7])", self.transport.remote_version
+ ):
+ pubkey_algo = "ssh-rsa-cert-v01@openssh.com"
+ self.transport._agreed_pubkey_algorithm = pubkey_algo
+ self._log(DEBUG, "OpenSSH<7.8 + RSA cert = forcing ssh-rsa!")
+ self._log(
+ DEBUG, "Agreed upon {!r} pubkey algorithm".format(pubkey_algo)
+ )
+ return pubkey_algo
+ # Normal attempts to handshake follow from here.
+ # Only consider RSA algos from our list, lest we agree on another!
+ my_algos = [x for x in self.transport.preferred_pubkeys if "rsa" in x]
+ self._log(DEBUG, "Our pubkey algorithm list: {}".format(my_algos))
+ # Short-circuit negatively if user disabled all RSA algos (heh)
+ if not my_algos:
+ raise SSHException(
+ "An RSA key was specified, but no RSA pubkey algorithms are configured!" # noqa
+ )
+ # Check for server-sig-algs if supported & sent
+ server_algo_str = u(
+ self.transport.server_extensions.get("server-sig-algs", b(""))
+ )
+ pubkey_algo = None
+ # Prefer to match against server-sig-algs
+ if server_algo_str:
+ server_algos = server_algo_str.split(",")
+ self._log(
+ DEBUG, "Server-side algorithm list: {}".format(server_algos)
+ )
+ # Only use algos from our list that the server likes, in our own
+ # preference order. (NOTE: purposefully using same style as in
+ # Transport...expect to refactor later)
+ agreement = list(filter(server_algos.__contains__, my_algos))
+ if agreement:
+ pubkey_algo = agreement[0]
+ self._log(
+ DEBUG,
+ "Agreed upon {!r} pubkey algorithm".format(pubkey_algo),
+ )
+ else:
+ self._log(DEBUG, "No common pubkey algorithms exist! Dying.")
+ # TODO: MAY want to use IncompatiblePeer again here but that's
+ # technically for initial key exchange, not pubkey auth.
+ err = "Unable to agree on a pubkey algorithm for signing a {!r} key!" # noqa
+ raise AuthenticationException(err.format(key_type))
+ # Fallback to something based purely on the key & our configuration
+ else:
+ pubkey_algo = self._choose_fallback_pubkey_algorithm(
+ key_type, my_algos
+ )
+ if key_type.endswith("-cert-v01@openssh.com"):
+ pubkey_algo += "-cert-v01@openssh.com"
+ self.transport._agreed_pubkey_algorithm = pubkey_algo
+ return pubkey_algo
+
+ def _parse_service_accept(self, m):
+ service = m.get_text()
+ if service == "ssh-userauth":
+ self._log(DEBUG, "userauth is OK")
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_REQUEST)
+ m.add_string(self.username)
+ m.add_string("ssh-connection")
+ m.add_string(self.auth_method)
+ if self.auth_method == "password":
+ m.add_boolean(False)
+ password = b(self.password)
+ m.add_string(password)
+ elif self.auth_method == "publickey":
+ m.add_boolean(True)
+ key_type, bits = self._get_key_type_and_bits(self.private_key)
+ algorithm = self._finalize_pubkey_algorithm(key_type)
+ m.add_string(algorithm)
+ m.add_string(bits)
+ blob = self._get_session_blob(
+ self.private_key,
+ "ssh-connection",
+ self.username,
+ algorithm,
+ )
+ sig = self.private_key.sign_ssh_data(blob, algorithm)
+ m.add_string(sig)
+ elif self.auth_method == "keyboard-interactive":
+ m.add_string("")
+ m.add_string(self.submethods)
+ elif self.auth_method == "gssapi-with-mic":
+ sshgss = GSSAuth(self.auth_method, self.gss_deleg_creds)
+ m.add_bytes(sshgss.ssh_gss_oids())
+ # send the supported GSSAPI OIDs to the server
+ self.transport._send_message(m)
+ ptype, m = self.transport.packetizer.read_message()
+ if ptype == MSG_USERAUTH_BANNER:
+ self._parse_userauth_banner(m)
+ ptype, m = self.transport.packetizer.read_message()
+ if ptype == MSG_USERAUTH_GSSAPI_RESPONSE:
+ # Read the mechanism selected by the server. We send just
+ # the Kerberos V5 OID, so the server can only respond with
+ # this OID.
+ mech = m.get_string()
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_TOKEN)
+ try:
+ m.add_string(
+ sshgss.ssh_init_sec_context(
+ self.gss_host, mech, self.username
+ )
+ )
+ except GSS_EXCEPTIONS as e:
+ return self._handle_local_gss_failure(e)
+ self.transport._send_message(m)
+ while True:
+ ptype, m = self.transport.packetizer.read_message()
+ if ptype == MSG_USERAUTH_GSSAPI_TOKEN:
+ srv_token = m.get_string()
+ try:
+ next_token = sshgss.ssh_init_sec_context(
+ self.gss_host,
+ mech,
+ self.username,
+ srv_token,
+ )
+ except GSS_EXCEPTIONS as e:
+ return self._handle_local_gss_failure(e)
+ # After this step the GSSAPI should not return any
+ # token. If it does, we keep sending the token to
+ # the server until no more token is returned.
+ if next_token is None:
+ break
+ else:
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_TOKEN)
+ m.add_string(next_token)
+ self.transport.send_message(m)
+ else:
+ raise SSHException(
+ "Received Package: {}".format(MSG_NAMES[ptype])
+ )
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_MIC)
+ # send the MIC to the server
+ m.add_string(sshgss.ssh_get_mic(self.transport.session_id))
+ elif ptype == MSG_USERAUTH_GSSAPI_ERRTOK:
+ # RFC 4462 says we are not required to implement GSS-API
+ # error messages.
+ # See RFC 4462 Section 3.8 in
+ # http://www.ietf.org/rfc/rfc4462.txt
+ raise SSHException("Server returned an error token")
+ elif ptype == MSG_USERAUTH_GSSAPI_ERROR:
+ maj_status = m.get_int()
+ min_status = m.get_int()
+ err_msg = m.get_string()
+ m.get_string() # Lang tag - discarded
+ raise SSHException(
+ """GSS-API Error:
+Major Status: {}
+Minor Status: {}
+Error Message: {}
+""".format(
+ maj_status, min_status, err_msg
+ )
+ )
+ elif ptype == MSG_USERAUTH_FAILURE:
+ self._parse_userauth_failure(m)
+ return
+ else:
+ raise SSHException(
+ "Received Package: {}".format(MSG_NAMES[ptype])
+ )
+ elif (
+ self.auth_method == "gssapi-keyex"
+ and self.transport.gss_kex_used
+ ):
+ kexgss = self.transport.kexgss_ctxt
+ kexgss.set_username(self.username)
+ mic_token = kexgss.ssh_get_mic(self.transport.session_id)
+ m.add_string(mic_token)
+ elif self.auth_method == "none":
+ pass
+ else:
+ raise SSHException(
+ 'Unknown auth method "{}"'.format(self.auth_method)
+ )
+ self.transport._send_message(m)
+ else:
+ self._log(
+ DEBUG, 'Service request "{}" accepted (?)'.format(service)
+ )
+
+ def _send_auth_result(self, username, method, result):
+ # okay, send result
+ m = Message()
+ if result == AUTH_SUCCESSFUL:
+ self._log(INFO, "Auth granted ({}).".format(method))
+ m.add_byte(cMSG_USERAUTH_SUCCESS)
+ self.authenticated = True
+ else:
+ self._log(INFO, "Auth rejected ({}).".format(method))
+ m.add_byte(cMSG_USERAUTH_FAILURE)
+ m.add_string(
+ self.transport.server_object.get_allowed_auths(username)
+ )
+ if result == AUTH_PARTIALLY_SUCCESSFUL:
+ m.add_boolean(True)
+ else:
+ m.add_boolean(False)
+ self.auth_fail_count += 1
+ self.transport._send_message(m)
+ if self.auth_fail_count >= 10:
+ self._disconnect_no_more_auth()
+ if result == AUTH_SUCCESSFUL:
+ self.transport._auth_trigger()
+
+ def _interactive_query(self, q):
+ # make interactive query instead of response
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_INFO_REQUEST)
+ m.add_string(q.name)
+ m.add_string(q.instructions)
+ m.add_string(bytes())
+ m.add_int(len(q.prompts))
+ for p in q.prompts:
+ m.add_string(p[0])
+ m.add_boolean(p[1])
+ self.transport._send_message(m)
+
+ def _parse_userauth_request(self, m):
+ if not self.transport.server_mode:
+ # er, uh... what?
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_FAILURE)
+ m.add_string("none")
+ m.add_boolean(False)
+ self.transport._send_message(m)
+ return
+ if self.authenticated:
+ # ignore
+ return
+ username = m.get_text()
+ service = m.get_text()
+ method = m.get_text()
+ self._log(
+ DEBUG,
+ "Auth request (type={}) service={}, username={}".format(
+ method, service, username
+ ),
+ )
+ if service != "ssh-connection":
+ self._disconnect_service_not_available()
+ return
+ if (self.auth_username is not None) and (
+ self.auth_username != username
+ ):
+ self._log(
+ WARNING,
+ "Auth rejected because the client attempted to change username in mid-flight", # noqa
+ )
+ self._disconnect_no_more_auth()
+ return
+ self.auth_username = username
+ # check if GSS-API authentication is enabled
+ gss_auth = self.transport.server_object.enable_auth_gssapi()
+
+ if method == "none":
+ result = self.transport.server_object.check_auth_none(username)
+ elif method == "password":
+ changereq = m.get_boolean()
+ password = m.get_binary()
+ try:
+ password = password.decode("UTF-8")
+ except UnicodeError:
+ # some clients/servers expect non-utf-8 passwords!
+ # in this case, just return the raw byte string.
+ pass
+ if changereq:
+ # always treated as failure, since we don't support changing
+ # passwords, but collect the list of valid auth types from
+ # the callback anyway
+ self._log(DEBUG, "Auth request to change passwords (rejected)")
+ newpassword = m.get_binary()
+ try:
+ newpassword = newpassword.decode("UTF-8", "replace")
+ except UnicodeError:
+ pass
+ result = AUTH_FAILED
+ else:
+ result = self.transport.server_object.check_auth_password(
+ username, password
+ )
+ elif method == "publickey":
+ sig_attached = m.get_boolean()
+ # NOTE: server never wants to guess a client's algo, they're
+ # telling us directly. No need for _finalize_pubkey_algorithm
+ # anywhere in this flow.
+ algorithm = m.get_text()
+ keyblob = m.get_binary()
+ try:
+ key = self._generate_key_from_request(algorithm, keyblob)
+ except SSHException as e:
+ self._log(INFO, "Auth rejected: public key: {}".format(str(e)))
+ key = None
+ except Exception as e:
+ msg = "Auth rejected: unsupported or mangled public key ({}: {})" # noqa
+ self._log(INFO, msg.format(e.__class__.__name__, e))
+ key = None
+ if key is None:
+ self._disconnect_no_more_auth()
+ return
+ # first check if this key is okay... if not, we can skip the verify
+ result = self.transport.server_object.check_auth_publickey(
+ username, key
+ )
+ if result != AUTH_FAILED:
+ # key is okay, verify it
+ if not sig_attached:
+ # client wants to know if this key is acceptable, before it
+ # signs anything... send special "ok" message
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_PK_OK)
+ m.add_string(algorithm)
+ m.add_string(keyblob)
+ self.transport._send_message(m)
+ return
+ sig = Message(m.get_binary())
+ blob = self._get_session_blob(
+ key, service, username, algorithm
+ )
+ if not key.verify_ssh_sig(blob, sig):
+ self._log(INFO, "Auth rejected: invalid signature")
+ result = AUTH_FAILED
+ elif method == "keyboard-interactive":
+ submethods = m.get_string()
+ result = self.transport.server_object.check_auth_interactive(
+ username, submethods
+ )
+ if isinstance(result, InteractiveQuery):
+ # make interactive query instead of response
+ self._interactive_query(result)
+ return
+ elif method == "gssapi-with-mic" and gss_auth:
+ sshgss = GSSAuth(method)
+ # Read the number of OID mechanisms supported by the client.
+ # OpenSSH sends just one OID. It's the Kerveros V5 OID and that's
+ # the only OID we support.
+ mechs = m.get_int()
+ # We can't accept more than one OID, so if the SSH client sends
+ # more than one, disconnect.
+ if mechs > 1:
+ self._log(
+ INFO,
+ "Disconnect: Received more than one GSS-API OID mechanism",
+ )
+ self._disconnect_no_more_auth()
+ desired_mech = m.get_string()
+ mech_ok = sshgss.ssh_check_mech(desired_mech)
+ # if we don't support the mechanism, disconnect.
+ if not mech_ok:
+ self._log(
+ INFO,
+ "Disconnect: Received an invalid GSS-API OID mechanism",
+ )
+ self._disconnect_no_more_auth()
+ # send the Kerberos V5 GSSAPI OID to the client
+ supported_mech = sshgss.ssh_gss_oids("server")
+ # RFC 4462 says we are not required to implement GSS-API error
+ # messages. See section 3.8 in http://www.ietf.org/rfc/rfc4462.txt
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_RESPONSE)
+ m.add_bytes(supported_mech)
+ self.transport.auth_handler = GssapiWithMicAuthHandler(
+ self, sshgss
+ )
+ self.transport._expected_packet = (
+ MSG_USERAUTH_GSSAPI_TOKEN,
+ MSG_USERAUTH_REQUEST,
+ MSG_SERVICE_REQUEST,
+ )
+ self.transport._send_message(m)
+ return
+ elif method == "gssapi-keyex" and gss_auth:
+ mic_token = m.get_string()
+ sshgss = self.transport.kexgss_ctxt
+ if sshgss is None:
+ # If there is no valid context, we reject the authentication
+ result = AUTH_FAILED
+ self._send_auth_result(username, method, result)
+ try:
+ sshgss.ssh_check_mic(
+ mic_token, self.transport.session_id, self.auth_username
+ )
+ except Exception:
+ result = AUTH_FAILED
+ self._send_auth_result(username, method, result)
+ raise
+ result = AUTH_SUCCESSFUL
+ self.transport.server_object.check_auth_gssapi_keyex(
+ username, result
+ )
+ else:
+ result = self.transport.server_object.check_auth_none(username)
+ # okay, send result
+ self._send_auth_result(username, method, result)
+
+ def _parse_userauth_success(self, m):
+ self._log(
+ INFO, "Authentication ({}) successful!".format(self.auth_method)
+ )
+ self.authenticated = True
+ self.transport._auth_trigger()
+ if self.auth_event is not None:
+ self.auth_event.set()
+
+ def _parse_userauth_failure(self, m):
+ authlist = m.get_list()
+ # TODO 4.0: we aren't giving callers access to authlist _unless_ it's
+ # partial authentication, so eg authtype=none can't work unless we
+ # tweak this.
+ partial = m.get_boolean()
+ if partial:
+ self._log(INFO, "Authentication continues...")
+ self._log(DEBUG, "Methods: " + str(authlist))
+ self.transport.saved_exception = PartialAuthentication(authlist)
+ elif self.auth_method not in authlist:
+ for msg in (
+ "Authentication type ({}) not permitted.".format(
+ self.auth_method
+ ),
+ "Allowed methods: {}".format(authlist),
+ ):
+ self._log(DEBUG, msg)
+ self.transport.saved_exception = BadAuthenticationType(
+ "Bad authentication type", authlist
+ )
+ else:
+ self._log(
+ INFO, "Authentication ({}) failed.".format(self.auth_method)
+ )
+ self.authenticated = False
+ self.username = None
+ if self.auth_event is not None:
+ self.auth_event.set()
+
+ def _parse_userauth_banner(self, m):
+ banner = m.get_string()
+ self.banner = banner
+ self._log(INFO, "Auth banner: {}".format(banner))
+ # who cares.
+
+ def _parse_userauth_info_request(self, m):
+ if self.auth_method != "keyboard-interactive":
+ raise SSHException("Illegal info request from server")
+ title = m.get_text()
+ instructions = m.get_text()
+ m.get_binary() # lang
+ prompts = m.get_int()
+ prompt_list = []
+ for i in range(prompts):
+ prompt_list.append((m.get_text(), m.get_boolean()))
+ response_list = self.interactive_handler(
+ title, instructions, prompt_list
+ )
+
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_INFO_RESPONSE)
+ m.add_int(len(response_list))
+ for r in response_list:
+ m.add_string(r)
+ self.transport._send_message(m)
+
+ def _parse_userauth_info_response(self, m):
+ if not self.transport.server_mode:
+ raise SSHException("Illegal info response from server")
+ n = m.get_int()
+ responses = []
+ for i in range(n):
+ responses.append(m.get_text())
+ result = self.transport.server_object.check_auth_interactive_response(
+ responses
+ )
+ if isinstance(result, InteractiveQuery):
+ # make interactive query instead of response
+ self._interactive_query(result)
+ return
+ self._send_auth_result(
+ self.auth_username, "keyboard-interactive", result
+ )
+
+ def _handle_local_gss_failure(self, e):
+ self.transport.saved_exception = e
+ self._log(DEBUG, "GSSAPI failure: {}".format(e))
+ self._log(INFO, "Authentication ({}) failed.".format(self.auth_method))
+ self.authenticated = False
+ self.username = None
+ if self.auth_event is not None:
+ self.auth_event.set()
+ return
+
+ # TODO 4.0: MAY make sense to make these tables into actual
+ # classes/instances that can be fed a mode bool or whatever. Or,
+ # alternately (both?) make the message types small classes or enums that
+ # embed this info within themselves (which could also then tidy up the
+ # current 'integer -> human readable short string' stuff in common.py).
+ # TODO: if we do that, also expose 'em publicly.
+
+ # Messages which should be handled _by_ servers (sent by clients)
+ @property
+ def _server_handler_table(self):
+ return {
+ # TODO 4.0: MSG_SERVICE_REQUEST ought to eventually move into
+ # Transport's server mode like the client side did, just for
+ # consistency.
+ MSG_SERVICE_REQUEST: self._parse_service_request,
+ MSG_USERAUTH_REQUEST: self._parse_userauth_request,
+ MSG_USERAUTH_INFO_RESPONSE: self._parse_userauth_info_response,
+ }
+
+ # Messages which should be handled _by_ clients (sent by servers)
+ @property
+ def _client_handler_table(self):
+ return {
+ MSG_SERVICE_ACCEPT: self._parse_service_accept,
+ MSG_USERAUTH_SUCCESS: self._parse_userauth_success,
+ MSG_USERAUTH_FAILURE: self._parse_userauth_failure,
+ MSG_USERAUTH_BANNER: self._parse_userauth_banner,
+ MSG_USERAUTH_INFO_REQUEST: self._parse_userauth_info_request,
+ }
+
+ # NOTE: prior to the fix for #1283, this was a static dict instead of a
+ # property. Should be backwards compatible in most/all cases.
+ @property
+ def _handler_table(self):
+ if self.transport.server_mode:
+ return self._server_handler_table
+ else:
+ return self._client_handler_table
+
+
+class GssapiWithMicAuthHandler:
+ """A specialized Auth handler for gssapi-with-mic
+
+ During the GSSAPI token exchange we need a modified dispatch table,
+ because the packet type numbers are not unique.
+ """
+
+ method = "gssapi-with-mic"
+
+ def __init__(self, delegate, sshgss):
+ self._delegate = delegate
+ self.sshgss = sshgss
+
+ def abort(self):
+ self._restore_delegate_auth_handler()
+ return self._delegate.abort()
+
+ @property
+ def transport(self):
+ return self._delegate.transport
+
+ @property
+ def _send_auth_result(self):
+ return self._delegate._send_auth_result
+
+ @property
+ def auth_username(self):
+ return self._delegate.auth_username
+
+ @property
+ def gss_host(self):
+ return self._delegate.gss_host
+
+ def _restore_delegate_auth_handler(self):
+ self.transport.auth_handler = self._delegate
+
+ def _parse_userauth_gssapi_token(self, m):
+ client_token = m.get_string()
+ # use the client token as input to establish a secure
+ # context.
+ sshgss = self.sshgss
+ try:
+ token = sshgss.ssh_accept_sec_context(
+ self.gss_host, client_token, self.auth_username
+ )
+ except Exception as e:
+ self.transport.saved_exception = e
+ result = AUTH_FAILED
+ self._restore_delegate_auth_handler()
+ self._send_auth_result(self.auth_username, self.method, result)
+ raise
+ if token is not None:
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_GSSAPI_TOKEN)
+ m.add_string(token)
+ self.transport._expected_packet = (
+ MSG_USERAUTH_GSSAPI_TOKEN,
+ MSG_USERAUTH_GSSAPI_MIC,
+ MSG_USERAUTH_REQUEST,
+ )
+ self.transport._send_message(m)
+
+ def _parse_userauth_gssapi_mic(self, m):
+ mic_token = m.get_string()
+ sshgss = self.sshgss
+ username = self.auth_username
+ self._restore_delegate_auth_handler()
+ try:
+ sshgss.ssh_check_mic(
+ mic_token, self.transport.session_id, username
+ )
+ except Exception as e:
+ self.transport.saved_exception = e
+ result = AUTH_FAILED
+ self._send_auth_result(username, self.method, result)
+ raise
+ # TODO: Implement client credential saving.
+ # The OpenSSH server is able to create a TGT with the delegated
+ # client credentials, but this is not supported by GSS-API.
+ result = AUTH_SUCCESSFUL
+ self.transport.server_object.check_auth_gssapi_with_mic(
+ username, result
+ )
+ # okay, send result
+ self._send_auth_result(username, self.method, result)
+
+ def _parse_service_request(self, m):
+ self._restore_delegate_auth_handler()
+ return self._delegate._parse_service_request(m)
+
+ def _parse_userauth_request(self, m):
+ self._restore_delegate_auth_handler()
+ return self._delegate._parse_userauth_request(m)
+
+ __handler_table = {
+ MSG_SERVICE_REQUEST: _parse_service_request,
+ MSG_USERAUTH_REQUEST: _parse_userauth_request,
+ MSG_USERAUTH_GSSAPI_TOKEN: _parse_userauth_gssapi_token,
+ MSG_USERAUTH_GSSAPI_MIC: _parse_userauth_gssapi_mic,
+ }
+
+ @property
+ def _handler_table(self):
+ # TODO: determine if we can cut this up like we did for the primary
+ # AuthHandler class.
+ return self.__handler_table
+
+
+class AuthOnlyHandler(AuthHandler):
+ """
+ AuthHandler, and just auth, no service requests!
+
+ .. versionadded:: 3.2
+ """
+
+ # NOTE: this purposefully duplicates some of the parent class in order to
+ # modernize, refactor, etc. The intent is that eventually we will collapse
+ # this one onto the parent in a backwards incompatible release.
+
+ @property
+ def _client_handler_table(self):
+ my_table = super()._client_handler_table.copy()
+ del my_table[MSG_SERVICE_ACCEPT]
+ return my_table
+
+ def send_auth_request(self, username, method, finish_message=None):
+ """
+ Submit a userauth request message & wait for response.
+
+ Performs the transport message send call, sets self.auth_event, and
+ will lock-n-block as necessary to both send, and wait for response to,
+ the USERAUTH_REQUEST.
+
+ Most callers will want to supply a callback to ``finish_message``,
+ which accepts a Message ``m`` and may call mutator methods on it to add
+ more fields.
+ """
+ # Store a few things for reference in handlers, including auth failure
+ # handler (which needs to know if we were using a bad method, etc)
+ self.auth_method = method
+ self.username = username
+ # Generic userauth request fields
+ m = Message()
+ m.add_byte(cMSG_USERAUTH_REQUEST)
+ m.add_string(username)
+ m.add_string("ssh-connection")
+ m.add_string(method)
+ # Caller usually has more to say, such as injecting password, key etc
+ finish_message(m)
+ # TODO 4.0: seems odd to have the client handle the lock and not
+ # Transport; that _may_ have been an artifact of allowing user
+ # threading event injection? Regardless, we don't want to move _this_
+ # locking into Transport._send_message now, because lots of other
+ # untouched code also uses that method and we might end up
+ # double-locking (?) but 4.0 would be a good time to revisit.
+ with self.transport.lock:
+ self.transport._send_message(m)
+ # We have cut out the higher level event args, but self.auth_event is
+ # still required for self.wait_for_response to function correctly (it's
+ # the mechanism used by the auth success/failure handlers, the abort
+ # handler, and a few other spots like in gssapi.
+ # TODO: interestingly, wait_for_response itself doesn't actually
+ # enforce that its event argument and self.auth_event are the same...
+ self.auth_event = threading.Event()
+ return self.wait_for_response(self.auth_event)
+
+ def auth_none(self, username):
+ return self.send_auth_request(username, "none")
+
+ def auth_publickey(self, username, key):
+ key_type, bits = self._get_key_type_and_bits(key)
+ algorithm = self._finalize_pubkey_algorithm(key_type)
+ blob = self._get_session_blob(
+ key,
+ "ssh-connection",
+ username,
+ algorithm,
+ )
+
+ def finish(m):
+ # This field doesn't appear to be named, but is False when querying
+ # for permission (ie knowing whether to even prompt a user for
+ # passphrase, etc) or True when just going for it. Paramiko has
+ # never bothered with the former type of message, apparently.
+ m.add_boolean(True)
+ m.add_string(algorithm)
+ m.add_string(bits)
+ m.add_string(key.sign_ssh_data(blob, algorithm))
+
+ return self.send_auth_request(username, "publickey", finish)
+
+ def auth_password(self, username, password):
+ def finish(m):
+ # Unnamed field that equates to "I am changing my password", which
+ # Paramiko clientside never supported and serverside only sort of
+ # supported.
+ m.add_boolean(False)
+ m.add_string(b(password))
+
+ return self.send_auth_request(username, "password", finish)
+
+ def auth_interactive(self, username, handler, submethods=""):
+ """
+ response_list = handler(title, instructions, prompt_list)
+ """
+ # Unlike most siblings, this auth method _does_ require other
+ # superclass handlers (eg userauth info request) to understand
+ # what's going on, so we still set some self attributes.
+ self.auth_method = "keyboard_interactive"
+ self.interactive_handler = handler
+
+ def finish(m):
+ # Empty string for deprecated language tag field, per RFC 4256:
+ # https://www.rfc-editor.org/rfc/rfc4256#section-3.1
+ m.add_string("")
+ m.add_string(submethods)
+
+ return self.send_auth_request(username, "keyboard-interactive", finish)
+
+ # NOTE: not strictly 'auth only' related, but allows users to opt-in.
+ def _choose_fallback_pubkey_algorithm(self, key_type, my_algos):
+ msg = "Server did not send a server-sig-algs list; defaulting to something in our preferred algorithms list" # noqa
+ self._log(DEBUG, msg)
+ noncert_key_type = key_type.replace("-cert-v01@openssh.com", "")
+ if key_type in my_algos or noncert_key_type in my_algos:
+ actual = key_type if key_type in my_algos else noncert_key_type
+ msg = f"Current key type, {actual!r}, is in our preferred list; using that" # noqa
+ algo = actual
+ else:
+ algo = my_algos[0]
+ msg = f"{key_type!r} not in our list - trying first list item instead, {algo!r}" # noqa
+ self._log(DEBUG, msg)
+ return algo
diff --git a/lib/paramiko/auth_strategy.py b/lib/paramiko/auth_strategy.py
new file mode 100644
index 0000000..03c1d87
--- /dev/null
+++ b/lib/paramiko/auth_strategy.py
@@ -0,0 +1,306 @@
+"""
+Modern, adaptable authentication machinery.
+
+Replaces certain parts of `.SSHClient`. For a concrete implementation, see the
+``OpenSSHAuthStrategy`` class in `Fabric `_.
+"""
+
+from collections import namedtuple
+
+from .agent import AgentKey
+from .util import get_logger
+from .ssh_exception import AuthenticationException
+
+
+class AuthSource:
+ """
+ Some SSH authentication source, such as a password, private key, or agent.
+
+ See subclasses in this module for concrete implementations.
+
+ All implementations must accept at least a ``username`` (``str``) kwarg.
+ """
+
+ def __init__(self, username):
+ self.username = username
+
+ def _repr(self, **kwargs):
+ # TODO: are there any good libs for this? maybe some helper from
+ # structlog?
+ pairs = [f"{k}={v!r}" for k, v in kwargs.items()]
+ joined = ", ".join(pairs)
+ return f"{self.__class__.__name__}({joined})"
+
+ def __repr__(self):
+ return self._repr()
+
+ def authenticate(self, transport):
+ """
+ Perform authentication.
+ """
+ raise NotImplementedError
+
+
+class NoneAuth(AuthSource):
+ """
+ Auth type "none", ie https://www.rfc-editor.org/rfc/rfc4252#section-5.2 .
+ """
+
+ def authenticate(self, transport):
+ return transport.auth_none(self.username)
+
+
+class Password(AuthSource):
+ """
+ Password authentication.
+
+ :param callable password_getter:
+ A lazy callable that should return a `str` password value at
+ authentication time, such as a `functools.partial` wrapping
+ `getpass.getpass`, an API call to a secrets store, or similar.
+
+ If you already know the password at instantiation time, you should
+ simply use something like ``lambda: "my literal"`` (for a literal, but
+ also, shame on you!) or ``lambda: variable_name`` (for something stored
+ in a variable).
+ """
+
+ def __init__(self, username, password_getter):
+ super().__init__(username=username)
+ self.password_getter = password_getter
+
+ def __repr__(self):
+ # Password auth is marginally more 'username-caring' than pkeys, so may
+ # as well log that info here.
+ return super()._repr(user=self.username)
+
+ def authenticate(self, transport):
+ # Lazily get the password, in case it's prompting a user
+ # TODO: be nice to log source _of_ the password?
+ password = self.password_getter()
+ return transport.auth_password(self.username, password)
+
+
+# TODO 4.0: twiddle this, or PKey, or both, so they're more obviously distinct.
+# TODO 4.0: the obvious is to make this more wordy (PrivateKeyAuth), the
+# minimalist approach might be to rename PKey to just Key (esp given all the
+# subclasses are WhateverKey and not WhateverPKey)
+class PrivateKey(AuthSource):
+ """
+ Essentially a mixin for private keys.
+
+ Knows how to auth, but leaves key material discovery/loading/decryption to
+ subclasses.
+
+ Subclasses **must** ensure that they've set ``self.pkey`` to a decrypted
+ `.PKey` instance before calling ``super().authenticate``; typically
+ either in their ``__init__``, or in an overridden ``authenticate`` prior to
+ its `super` call.
+ """
+
+ def authenticate(self, transport):
+ return transport.auth_publickey(self.username, self.pkey)
+
+
+class InMemoryPrivateKey(PrivateKey):
+ """
+ An in-memory, decrypted `.PKey` object.
+ """
+
+ def __init__(self, username, pkey):
+ super().__init__(username=username)
+ # No decryption (presumably) necessary!
+ self.pkey = pkey
+
+ def __repr__(self):
+ # NOTE: most of interesting repr-bits for private keys is in PKey.
+ # TODO: tacking on agent-ness like this is a bit awkward, but, eh?
+ rep = super()._repr(pkey=self.pkey)
+ if isinstance(self.pkey, AgentKey):
+ rep += " [agent]"
+ return rep
+
+
+class OnDiskPrivateKey(PrivateKey):
+ """
+ Some on-disk private key that needs opening and possibly decrypting.
+
+ :param str source:
+ String tracking where this key's path was specified; should be one of
+ ``"ssh-config"``, ``"python-config"``, or ``"implicit-home"``.
+ :param Path path:
+ The filesystem path this key was loaded from.
+ :param PKey pkey:
+ The `PKey` object this auth source uses/represents.
+ """
+
+ def __init__(self, username, source, path, pkey):
+ super().__init__(username=username)
+ self.source = source
+ allowed = ("ssh-config", "python-config", "implicit-home")
+ if source not in allowed:
+ raise ValueError(f"source argument must be one of: {allowed!r}")
+ self.path = path
+ # Superclass wants .pkey, other two are mostly for display/debugging.
+ self.pkey = pkey
+
+ def __repr__(self):
+ return self._repr(
+ key=self.pkey, source=self.source, path=str(self.path)
+ )
+
+
+# TODO re sources: is there anything in an OpenSSH config file that doesn't fit
+# into what Paramiko already had kwargs for?
+
+
+SourceResult = namedtuple("SourceResult", ["source", "result"])
+
+# TODO: tempting to make this an OrderedDict, except the keys essentially want
+# to be rich objects (AuthSources) which do not make for useful user indexing?
+# TODO: members being vanilla tuples is pretty old-school/expedient; they
+# "really" want to be something that's type friendlier (unless the tuple's 2nd
+# member being a Union of two types is "fine"?), which I assume means yet more
+# classes, eg an abstract SourceResult with concrete AuthSuccess and
+# AuthFailure children?
+# TODO: arguably we want __init__ typechecking of the members (or to leverage
+# mypy by classifying this literally as list-of-AuthSource?)
+class AuthResult(list):
+ """
+ Represents a partial or complete SSH authentication attempt.
+
+ This class conceptually extends `AuthStrategy` by pairing the former's
+ authentication **sources** with the **results** of trying to authenticate
+ with them.
+
+ `AuthResult` is a (subclass of) `list` of `namedtuple`, which are of the
+ form ``namedtuple('SourceResult', 'source', 'result')`` (where the
+ ``source`` member is an `AuthSource` and the ``result`` member is either a
+ return value from the relevant `.Transport` method, or an exception
+ object).
+
+ .. note::
+ Transport auth method results are always themselves a ``list`` of "next
+ allowable authentication methods".
+
+ In the simple case of "you just authenticated successfully", it's an
+ empty list; if your auth was rejected but you're allowed to try again,
+ it will be a list of string method names like ``pubkey`` or
+ ``password``.
+
+ The ``__str__`` of this class represents the empty-list scenario as the
+ word ``success``, which should make reading the result of an
+ authentication session more obvious to humans.
+
+ Instances also have a `strategy` attribute referencing the `AuthStrategy`
+ which was attempted.
+ """
+
+ def __init__(self, strategy, *args, **kwargs):
+ self.strategy = strategy
+ super().__init__(*args, **kwargs)
+
+ def __str__(self):
+ # NOTE: meaningfully distinct from __repr__, which still wants to use
+ # superclass' implementation.
+ # TODO: go hog wild, use rich.Table? how is that on degraded term's?
+ # TODO: test this lol
+ return "\n".join(
+ f"{x.source} -> {x.result or 'success'}" for x in self
+ )
+
+
+# TODO 4.0: descend from SSHException or even just Exception
+class AuthFailure(AuthenticationException):
+ """
+ Basic exception wrapping an `AuthResult` indicating overall auth failure.
+
+ Note that `AuthFailure` descends from `AuthenticationException` but is
+ generally "higher level"; the latter is now only raised by individual
+ `AuthSource` attempts and should typically only be seen by users when
+ encapsulated in this class. It subclasses `AuthenticationException`
+ primarily for backwards compatibility reasons.
+ """
+
+ def __init__(self, result):
+ self.result = result
+
+ def __str__(self):
+ return "\n" + str(self.result)
+
+
+class AuthStrategy:
+ """
+ This class represents one or more attempts to auth with an SSH server.
+
+ By default, subclasses must at least accept an ``ssh_config``
+ (`.SSHConfig`) keyword argument, but may opt to accept more as needed for
+ their particular strategy.
+ """
+
+ def __init__(
+ self,
+ ssh_config,
+ ):
+ self.ssh_config = ssh_config
+ self.log = get_logger(__name__)
+
+ def get_sources(self):
+ """
+ Generator yielding `AuthSource` instances, in the order to try.
+
+ This is the primary override point for subclasses: you figure out what
+ sources you need, and ``yield`` them.
+
+ Subclasses _of_ subclasses may find themselves wanting to do things
+ like filtering or discarding around a call to `super`.
+ """
+ raise NotImplementedError
+
+ def authenticate(self, transport):
+ """
+ Handles attempting `AuthSource` instances yielded from `get_sources`.
+
+ You *normally* won't need to override this, but it's an option for
+ advanced users.
+ """
+ succeeded = False
+ overall_result = AuthResult(strategy=self)
+ # TODO: arguably we could fit in a "send none auth, record allowed auth
+ # types sent back" thing here as OpenSSH-client does, but that likely
+ # wants to live in fabric.OpenSSHAuthStrategy as not all target servers
+ # will implement it!
+ # TODO: needs better "server told us too many attempts" checking!
+ for source in self.get_sources():
+ self.log.debug(f"Trying {source}")
+ try: # NOTE: this really wants to _only_ wrap the authenticate()!
+ result = source.authenticate(transport)
+ succeeded = True
+ # TODO: 'except PartialAuthentication' is needed for 2FA and
+ # similar, as per old SSHClient.connect - it is the only way
+ # AuthHandler supplies access to the 'name-list' field from
+ # MSG_USERAUTH_FAILURE, at present.
+ except Exception as e:
+ result = e
+ # TODO: look at what this could possibly raise, we don't really
+ # want Exception here, right? just SSHException subclasses? or
+ # do we truly want to capture anything at all with assumption
+ # it's easy enough for users to look afterwards?
+ # NOTE: showing type, not message, for tersity & also most of
+ # the time it's basically just "Authentication failed."
+ source_class = e.__class__.__name__
+ self.log.info(
+ f"Authentication via {source} failed with {source_class}"
+ )
+ overall_result.append(SourceResult(source, result))
+ if succeeded:
+ break
+ # Gotta die here if nothing worked, otherwise Transport's main loop
+ # just kinda hangs out until something times out!
+ if not succeeded:
+ raise AuthFailure(result=overall_result)
+ # Success: give back what was done, in case they care.
+ return overall_result
+
+ # TODO: is there anything OpenSSH client does which _can't_ cleanly map to
+ # iterating a generator?
diff --git a/lib/paramiko/ber.py b/lib/paramiko/ber.py
new file mode 100644
index 0000000..b8287f5
--- /dev/null
+++ b/lib/paramiko/ber.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+from paramiko.common import max_byte, zero_byte, byte_ord, byte_chr
+
+import paramiko.util as util
+from paramiko.util import b
+from paramiko.sftp import int64
+
+
+class BERException(Exception):
+ pass
+
+
+class BER:
+ """
+ Robey's tiny little attempt at a BER decoder.
+ """
+
+ def __init__(self, content=bytes()):
+ self.content = b(content)
+ self.idx = 0
+
+ def asbytes(self):
+ return self.content
+
+ def __str__(self):
+ return self.asbytes()
+
+ def __repr__(self):
+ return "BER('" + repr(self.content) + "')"
+
+ def decode(self):
+ return self.decode_next()
+
+ def decode_next(self):
+ if self.idx >= len(self.content):
+ return None
+ ident = byte_ord(self.content[self.idx])
+ self.idx += 1
+ if (ident & 31) == 31:
+ # identifier > 30
+ ident = 0
+ while self.idx < len(self.content):
+ t = byte_ord(self.content[self.idx])
+ self.idx += 1
+ ident = (ident << 7) | (t & 0x7F)
+ if not (t & 0x80):
+ break
+ if self.idx >= len(self.content):
+ return None
+ # now fetch length
+ size = byte_ord(self.content[self.idx])
+ self.idx += 1
+ if size & 0x80:
+ # more complimicated...
+ # FIXME: theoretically should handle indefinite-length (0x80)
+ t = size & 0x7F
+ if self.idx + t > len(self.content):
+ return None
+ size = util.inflate_long(
+ self.content[self.idx : self.idx + t], True
+ )
+ self.idx += t
+ if self.idx + size > len(self.content):
+ # can't fit
+ return None
+ data = self.content[self.idx : self.idx + size]
+ self.idx += size
+ # now switch on id
+ if ident == 0x30:
+ # sequence
+ return self.decode_sequence(data)
+ elif ident == 2:
+ # int
+ return util.inflate_long(data)
+ else:
+ # 1: boolean (00 false, otherwise true)
+ msg = "Unknown ber encoding type {:d} (robey is lazy)"
+ raise BERException(msg.format(ident))
+
+ @staticmethod
+ def decode_sequence(data):
+ out = []
+ ber = BER(data)
+ while True:
+ x = ber.decode_next()
+ if x is None:
+ break
+ out.append(x)
+ return out
+
+ def encode_tlv(self, ident, val):
+ # no need to support ident > 31 here
+ self.content += byte_chr(ident)
+ if len(val) > 0x7F:
+ lenstr = util.deflate_long(len(val))
+ self.content += byte_chr(0x80 + len(lenstr)) + lenstr
+ else:
+ self.content += byte_chr(len(val))
+ self.content += val
+
+ def encode(self, x):
+ if type(x) is bool:
+ if x:
+ self.encode_tlv(1, max_byte)
+ else:
+ self.encode_tlv(1, zero_byte)
+ elif (type(x) is int) or (type(x) is int64):
+ self.encode_tlv(2, util.deflate_long(x))
+ elif type(x) is str:
+ self.encode_tlv(4, x)
+ elif (type(x) is list) or (type(x) is tuple):
+ self.encode_tlv(0x30, self.encode_sequence(x))
+ else:
+ raise BERException(
+ "Unknown type for encoding: {!r}".format(type(x))
+ )
+
+ @staticmethod
+ def encode_sequence(data):
+ ber = BER()
+ for item in data:
+ ber.encode(item)
+ return ber.asbytes()
diff --git a/lib/paramiko/buffered_pipe.py b/lib/paramiko/buffered_pipe.py
new file mode 100644
index 0000000..c19279c
--- /dev/null
+++ b/lib/paramiko/buffered_pipe.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2006-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Attempt to generalize the "feeder" part of a `.Channel`: an object which can be
+read from and closed, but is reading from a buffer fed by another thread. The
+read operations are blocking and can have a timeout set.
+"""
+
+import array
+import threading
+import time
+from paramiko.util import b
+
+
+class PipeTimeout(IOError):
+ """
+ Indicates that a timeout was reached on a read from a `.BufferedPipe`.
+ """
+
+ pass
+
+
+class BufferedPipe:
+ """
+ A buffer that obeys normal read (with timeout) & close semantics for a
+ file or socket, but is fed data from another thread. This is used by
+ `.Channel`.
+ """
+
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._cv = threading.Condition(self._lock)
+ self._event = None
+ self._buffer = array.array("B")
+ self._closed = False
+
+ def _buffer_frombytes(self, data):
+ self._buffer.frombytes(data)
+
+ def _buffer_tobytes(self, limit=None):
+ return self._buffer[:limit].tobytes()
+
+ def set_event(self, event):
+ """
+ Set an event on this buffer. When data is ready to be read (or the
+ buffer has been closed), the event will be set. When no data is
+ ready, the event will be cleared.
+
+ :param threading.Event event: the event to set/clear
+ """
+ self._lock.acquire()
+ try:
+ self._event = event
+ # Make sure the event starts in `set` state if we appear to already
+ # be closed; otherwise, if we start in `clear` state & are closed,
+ # nothing will ever call `.feed` and the event (& OS pipe, if we're
+ # wrapping one - see `Channel.fileno`) will permanently stay in
+ # `clear`, causing deadlock if e.g. `select`ed upon.
+ if self._closed or len(self._buffer) > 0:
+ event.set()
+ else:
+ event.clear()
+ finally:
+ self._lock.release()
+
+ def feed(self, data):
+ """
+ Feed new data into this pipe. This method is assumed to be called
+ from a separate thread, so synchronization is done.
+
+ :param data: the data to add, as a ``str`` or ``bytes``
+ """
+ self._lock.acquire()
+ try:
+ if self._event is not None:
+ self._event.set()
+ self._buffer_frombytes(b(data))
+ self._cv.notify_all()
+ finally:
+ self._lock.release()
+
+ def read_ready(self):
+ """
+ Returns true if data is buffered and ready to be read from this
+ feeder. A ``False`` result does not mean that the feeder has closed;
+ it means you may need to wait before more data arrives.
+
+ :return:
+ ``True`` if a `read` call would immediately return at least one
+ byte; ``False`` otherwise.
+ """
+ self._lock.acquire()
+ try:
+ if len(self._buffer) == 0:
+ return False
+ return True
+ finally:
+ self._lock.release()
+
+ def read(self, nbytes, timeout=None):
+ """
+ Read data from the pipe. The return value is a string representing
+ the data received. The maximum amount of data to be received at once
+ is specified by ``nbytes``. If a string of length zero is returned,
+ the pipe has been closed.
+
+ The optional ``timeout`` argument can be a nonnegative float expressing
+ seconds, or ``None`` for no timeout. If a float is given, a
+ `.PipeTimeout` will be raised if the timeout period value has elapsed
+ before any data arrives.
+
+ :param int nbytes: maximum number of bytes to read
+ :param float timeout:
+ maximum seconds to wait (or ``None``, the default, to wait forever)
+ :return: the read data, as a ``str`` or ``bytes``
+
+ :raises:
+ `.PipeTimeout` -- if a timeout was specified and no data was ready
+ before that timeout
+ """
+ out = bytes()
+ self._lock.acquire()
+ try:
+ if len(self._buffer) == 0:
+ if self._closed:
+ return out
+ # should we block?
+ if timeout == 0.0:
+ raise PipeTimeout()
+ # loop here in case we get woken up but a different thread has
+ # grabbed everything in the buffer.
+ while (len(self._buffer) == 0) and not self._closed:
+ then = time.time()
+ self._cv.wait(timeout)
+ if timeout is not None:
+ timeout -= time.time() - then
+ if timeout <= 0.0:
+ raise PipeTimeout()
+
+ # something's in the buffer and we have the lock!
+ if len(self._buffer) <= nbytes:
+ out = self._buffer_tobytes()
+ del self._buffer[:]
+ if (self._event is not None) and not self._closed:
+ self._event.clear()
+ else:
+ out = self._buffer_tobytes(nbytes)
+ del self._buffer[:nbytes]
+ finally:
+ self._lock.release()
+
+ return out
+
+ def empty(self):
+ """
+ Clear out the buffer and return all data that was in it.
+
+ :return:
+ any data that was in the buffer prior to clearing it out, as a
+ `str`
+ """
+ self._lock.acquire()
+ try:
+ out = self._buffer_tobytes()
+ del self._buffer[:]
+ if (self._event is not None) and not self._closed:
+ self._event.clear()
+ return out
+ finally:
+ self._lock.release()
+
+ def close(self):
+ """
+ Close this pipe object. Future calls to `read` after the buffer
+ has been emptied will return immediately with an empty string.
+ """
+ self._lock.acquire()
+ try:
+ self._closed = True
+ self._cv.notify_all()
+ if self._event is not None:
+ self._event.set()
+ finally:
+ self._lock.release()
+
+ def __len__(self):
+ """
+ Return the number of bytes buffered.
+
+ :return: number (`int`) of bytes buffered
+ """
+ self._lock.acquire()
+ try:
+ return len(self._buffer)
+ finally:
+ self._lock.release()
diff --git a/lib/paramiko/channel.py b/lib/paramiko/channel.py
new file mode 100644
index 0000000..25326ca
--- /dev/null
+++ b/lib/paramiko/channel.py
@@ -0,0 +1,1390 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Abstraction for an SSH2 channel.
+"""
+
+import binascii
+import os
+import socket
+import time
+import threading
+
+from functools import wraps
+
+from paramiko import util
+from paramiko.common import (
+ cMSG_CHANNEL_REQUEST,
+ cMSG_CHANNEL_WINDOW_ADJUST,
+ cMSG_CHANNEL_DATA,
+ cMSG_CHANNEL_EXTENDED_DATA,
+ DEBUG,
+ ERROR,
+ cMSG_CHANNEL_SUCCESS,
+ cMSG_CHANNEL_FAILURE,
+ cMSG_CHANNEL_EOF,
+ cMSG_CHANNEL_CLOSE,
+)
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+from paramiko.file import BufferedFile
+from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
+from paramiko import pipe
+from paramiko.util import ClosingContextManager
+
+
+def open_only(func):
+ """
+ Decorator for `.Channel` methods which performs an openness check.
+
+ :raises:
+ `.SSHException` -- If the wrapped method is called on an unopened
+ `.Channel`.
+ """
+
+ @wraps(func)
+ def _check(self, *args, **kwds):
+ if (
+ self.closed
+ or self.eof_received
+ or self.eof_sent
+ or not self.active
+ ):
+ raise SSHException("Channel is not open")
+ return func(self, *args, **kwds)
+
+ return _check
+
+
+class Channel(ClosingContextManager):
+ """
+ A secure tunnel across an SSH `.Transport`. A Channel is meant to behave
+ like a socket, and has an API that should be indistinguishable from the
+ Python socket API.
+
+ Because SSH2 has a windowing kind of flow control, if you stop reading data
+ from a Channel and its buffer fills up, the server will be unable to send
+ you any more data until you read some of it. (This won't affect other
+ channels on the same transport -- all channels on a single transport are
+ flow-controlled independently.) Similarly, if the server isn't reading
+ data you send, calls to `send` may block, unless you set a timeout. This
+ is exactly like a normal network socket, so it shouldn't be too surprising.
+
+ Instances of this class may be used as context managers.
+ """
+
+ def __init__(self, chanid):
+ """
+ Create a new channel. The channel is not associated with any
+ particular session or `.Transport` until the Transport attaches it.
+ Normally you would only call this method from the constructor of a
+ subclass of `.Channel`.
+
+ :param int chanid:
+ the ID of this channel, as passed by an existing `.Transport`.
+ """
+ #: Channel ID
+ self.chanid = chanid
+ #: Remote channel ID
+ self.remote_chanid = 0
+ #: `.Transport` managing this channel
+ self.transport = None
+ #: Whether the connection is presently active
+ self.active = False
+ self.eof_received = 0
+ self.eof_sent = 0
+ self.in_buffer = BufferedPipe()
+ self.in_stderr_buffer = BufferedPipe()
+ self.timeout = None
+ #: Whether the connection has been closed
+ self.closed = False
+ self.ultra_debug = False
+ self.lock = threading.Lock()
+ self.out_buffer_cv = threading.Condition(self.lock)
+ self.in_window_size = 0
+ self.out_window_size = 0
+ self.in_max_packet_size = 0
+ self.out_max_packet_size = 0
+ self.in_window_threshold = 0
+ self.in_window_sofar = 0
+ self.status_event = threading.Event()
+ self._name = str(chanid)
+ self.logger = util.get_logger("paramiko.transport")
+ self._pipe = None
+ self.event = threading.Event()
+ self.event_ready = False
+ self.combine_stderr = False
+ self.exit_status = -1
+ self.origin_addr = None
+
+ def __del__(self):
+ try:
+ self.close()
+ except:
+ pass
+
+ def __repr__(self):
+ """
+ Return a string representation of this object, for debugging.
+ """
+ out = " 0:
+ out += " in-buffer={}".format(len(self.in_buffer))
+ out += " -> " + repr(self.transport)
+ out += ">"
+ return out
+
+ @open_only
+ def get_pty(
+ self,
+ term="vt100",
+ width=80,
+ height=24,
+ width_pixels=0,
+ height_pixels=0,
+ ):
+ """
+ Request a pseudo-terminal from the server. This is usually used right
+ after creating a client channel, to ask the server to provide some
+ basic terminal semantics for a shell invoked with `invoke_shell`.
+ It isn't necessary (or desirable) to call this method if you're going
+ to execute a single command with `exec_command`.
+
+ :param str term: the terminal type to emulate
+ (for example, ``'vt100'``)
+ :param int width: width (in characters) of the terminal screen
+ :param int height: height (in characters) of the terminal screen
+ :param int width_pixels: width (in pixels) of the terminal screen
+ :param int height_pixels: height (in pixels) of the terminal screen
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("pty-req")
+ m.add_boolean(True)
+ m.add_string(term)
+ m.add_int(width)
+ m.add_int(height)
+ m.add_int(width_pixels)
+ m.add_int(height_pixels)
+ m.add_string(bytes())
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+
+ @open_only
+ def invoke_shell(self):
+ """
+ Request an interactive shell session on this channel. If the server
+ allows it, the channel will then be directly connected to the stdin,
+ stdout, and stderr of the shell.
+
+ Normally you would call `get_pty` before this, in which case the
+ shell will operate through the pty, and the channel will be connected
+ to the stdin and stdout of the pty.
+
+ When the shell exits, the channel will be closed and can't be reused.
+ You must open a new channel if you wish to open another shell.
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("shell")
+ m.add_boolean(True)
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+
+ @open_only
+ def exec_command(self, command):
+ """
+ Execute a command on the server. If the server allows it, the channel
+ will then be directly connected to the stdin, stdout, and stderr of
+ the command being executed.
+
+ When the command finishes executing, the channel will be closed and
+ can't be reused. You must open a new channel if you wish to execute
+ another command.
+
+ :param str command: a shell command to execute.
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("exec")
+ m.add_boolean(True)
+ m.add_string(command)
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+
+ @open_only
+ def invoke_subsystem(self, subsystem):
+ """
+ Request a subsystem on the server (for example, ``sftp``). If the
+ server allows it, the channel will then be directly connected to the
+ requested subsystem.
+
+ When the subsystem finishes, the channel will be closed and can't be
+ reused.
+
+ :param str subsystem: name of the subsystem being requested.
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("subsystem")
+ m.add_boolean(True)
+ m.add_string(subsystem)
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+
+ @open_only
+ def resize_pty(self, width=80, height=24, width_pixels=0, height_pixels=0):
+ """
+ Resize the pseudo-terminal. This can be used to change the width and
+ height of the terminal emulation created in a previous `get_pty` call.
+
+ :param int width: new width (in characters) of the terminal screen
+ :param int height: new height (in characters) of the terminal screen
+ :param int width_pixels: new width (in pixels) of the terminal screen
+ :param int height_pixels: new height (in pixels) of the terminal screen
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("window-change")
+ m.add_boolean(False)
+ m.add_int(width)
+ m.add_int(height)
+ m.add_int(width_pixels)
+ m.add_int(height_pixels)
+ self.transport._send_user_message(m)
+
+ @open_only
+ def update_environment(self, environment):
+ """
+ Updates this channel's remote shell environment.
+
+ .. note::
+ This operation is additive - i.e. the current environment is not
+ reset before the given environment variables are set.
+
+ .. warning::
+ Servers may silently reject some environment variables; see the
+ warning in `set_environment_variable` for details.
+
+ :param dict environment:
+ a dictionary containing the name and respective values to set
+ :raises:
+ `.SSHException` -- if any of the environment variables was rejected
+ by the server or the channel was closed
+ """
+ for name, value in environment.items():
+ try:
+ self.set_environment_variable(name, value)
+ except SSHException as e:
+ err = 'Failed to set environment variable "{}".'
+ raise SSHException(err.format(name), e)
+
+ @open_only
+ def set_environment_variable(self, name, value):
+ """
+ Set the value of an environment variable.
+
+ .. warning::
+ The server may reject this request depending on its ``AcceptEnv``
+ setting; such rejections will fail silently (which is common client
+ practice for this particular request type). Make sure you
+ understand your server's configuration before using!
+
+ :param str name: name of the environment variable
+ :param str value: value of the environment variable
+
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("env")
+ m.add_boolean(False)
+ m.add_string(name)
+ m.add_string(value)
+ self.transport._send_user_message(m)
+
+ def exit_status_ready(self):
+ """
+ Return true if the remote process has exited and returned an exit
+ status. You may use this to poll the process status if you don't
+ want to block in `recv_exit_status`. Note that the server may not
+ return an exit status in some cases (like bad servers).
+
+ :return:
+ ``True`` if `recv_exit_status` will return immediately, else
+ ``False``.
+
+ .. versionadded:: 1.7.3
+ """
+ return self.closed or self.status_event.is_set()
+
+ def recv_exit_status(self):
+ """
+ Return the exit status from the process on the server. This is
+ mostly useful for retrieving the results of an `exec_command`.
+ If the command hasn't finished yet, this method will wait until
+ it does, or until the channel is closed. If no exit status is
+ provided by the server, -1 is returned.
+
+ .. warning::
+ In some situations, receiving remote output larger than the current
+ `.Transport` or session's ``window_size`` (e.g. that set by the
+ ``default_window_size`` kwarg for `.Transport.__init__`) will cause
+ `.recv_exit_status` to hang indefinitely if it is called prior to a
+ sufficiently large `.Channel.recv` (or if there are no threads
+ calling `.Channel.recv` in the background).
+
+ In these cases, ensuring that `.recv_exit_status` is called *after*
+ `.Channel.recv` (or, again, using threads) can avoid the hang.
+
+ :return: the exit code (as an `int`) of the process on the server.
+
+ .. versionadded:: 1.2
+ """
+ self.status_event.wait()
+ assert self.status_event.is_set()
+ return self.exit_status
+
+ def send_exit_status(self, status):
+ """
+ Send the exit status of an executed command to the client. (This
+ really only makes sense in server mode.) Many clients expect to
+ get some sort of status code back from an executed command after
+ it completes.
+
+ :param int status: the exit code of the process
+
+ .. versionadded:: 1.2
+ """
+ # in many cases, the channel will not still be open here.
+ # that's fine.
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("exit-status")
+ m.add_boolean(False)
+ m.add_int(status)
+ self.transport._send_user_message(m)
+
+ @open_only
+ def request_x11(
+ self,
+ screen_number=0,
+ auth_protocol=None,
+ auth_cookie=None,
+ single_connection=False,
+ handler=None,
+ ):
+ """
+ Request an x11 session on this channel. If the server allows it,
+ further x11 requests can be made from the server to the client,
+ when an x11 application is run in a shell session.
+
+ From :rfc:`4254`::
+
+ It is RECOMMENDED that the 'x11 authentication cookie' that is
+ sent be a fake, random cookie, and that the cookie be checked and
+ replaced by the real cookie when a connection request is received.
+
+ If you omit the auth_cookie, a new secure random 128-bit value will be
+ generated, used, and returned. You will need to use this value to
+ verify incoming x11 requests and replace them with the actual local
+ x11 cookie (which requires some knowledge of the x11 protocol).
+
+ If a handler is passed in, the handler is called from another thread
+ whenever a new x11 connection arrives. The default handler queues up
+ incoming x11 connections, which may be retrieved using
+ `.Transport.accept`. The handler's calling signature is::
+
+ handler(channel: Channel, (address: str, port: int))
+
+ :param int screen_number: the x11 screen number (0, 10, etc.)
+ :param str auth_protocol:
+ the name of the X11 authentication method used; if none is given,
+ ``"MIT-MAGIC-COOKIE-1"`` is used
+ :param str auth_cookie:
+ hexadecimal string containing the x11 auth cookie; if none is
+ given, a secure random 128-bit value is generated
+ :param bool single_connection:
+ if True, only a single x11 connection will be forwarded (by
+ default, any number of x11 connections can arrive over this
+ session)
+ :param handler:
+ an optional callable handler to use for incoming X11 connections
+ :return: the auth_cookie used
+ """
+ if auth_protocol is None:
+ auth_protocol = "MIT-MAGIC-COOKIE-1"
+ if auth_cookie is None:
+ auth_cookie = binascii.hexlify(os.urandom(16))
+
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("x11-req")
+ m.add_boolean(True)
+ m.add_boolean(single_connection)
+ m.add_string(auth_protocol)
+ m.add_string(auth_cookie)
+ m.add_int(screen_number)
+ self._event_pending()
+ self.transport._send_user_message(m)
+ self._wait_for_event()
+ self.transport._set_x11_handler(handler)
+ return auth_cookie
+
+ @open_only
+ def request_forward_agent(self, handler):
+ """
+ Request for a forward SSH Agent on this channel.
+ This is only valid for an ssh-agent from OpenSSH !!!
+
+ :param handler:
+ a required callable handler to use for incoming SSH Agent
+ connections
+
+ :return: True if we are ok, else False
+ (at that time we always return ok)
+
+ :raises: SSHException in case of channel problem.
+ """
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_REQUEST)
+ m.add_int(self.remote_chanid)
+ m.add_string("auth-agent-req@openssh.com")
+ m.add_boolean(False)
+ self.transport._send_user_message(m)
+ self.transport._set_forward_agent_handler(handler)
+ return True
+
+ def get_transport(self):
+ """
+ Return the `.Transport` associated with this channel.
+ """
+ return self.transport
+
+ def set_name(self, name):
+ """
+ Set a name for this channel. Currently it's only used to set the name
+ of the channel in logfile entries. The name can be fetched with the
+ `get_name` method.
+
+ :param str name: new channel name
+ """
+ self._name = name
+
+ def get_name(self):
+ """
+ Get the name of this channel that was previously set by `set_name`.
+ """
+ return self._name
+
+ def get_id(self):
+ """
+ Return the `int` ID # for this channel.
+
+ The channel ID is unique across a `.Transport` and usually a small
+ number. It's also the number passed to
+ `.ServerInterface.check_channel_request` when determining whether to
+ accept a channel request in server mode.
+ """
+ return self.chanid
+
+ def set_combine_stderr(self, combine):
+ """
+ Set whether stderr should be combined into stdout on this channel.
+ The default is ``False``, but in some cases it may be convenient to
+ have both streams combined.
+
+ If this is ``False``, and `exec_command` is called (or ``invoke_shell``
+ with no pty), output to stderr will not show up through the `recv`
+ and `recv_ready` calls. You will have to use `recv_stderr` and
+ `recv_stderr_ready` to get stderr output.
+
+ If this is ``True``, data will never show up via `recv_stderr` or
+ `recv_stderr_ready`.
+
+ :param bool combine:
+ ``True`` if stderr output should be combined into stdout on this
+ channel.
+ :return: the previous setting (a `bool`).
+
+ .. versionadded:: 1.1
+ """
+ data = bytes()
+ self.lock.acquire()
+ try:
+ old = self.combine_stderr
+ self.combine_stderr = combine
+ if combine and not old:
+ # copy old stderr buffer into primary buffer
+ data = self.in_stderr_buffer.empty()
+ finally:
+ self.lock.release()
+ if len(data) > 0:
+ self._feed(data)
+ return old
+
+ # ...socket API...
+
+ def settimeout(self, timeout):
+ """
+ Set a timeout on blocking read/write operations. The ``timeout``
+ argument can be a nonnegative float expressing seconds, or ``None``.
+ If a float is given, subsequent channel read/write operations will
+ raise a timeout exception if the timeout period value has elapsed
+ before the operation has completed. Setting a timeout of ``None``
+ disables timeouts on socket operations.
+
+ ``chan.settimeout(0.0)`` is equivalent to ``chan.setblocking(0)``;
+ ``chan.settimeout(None)`` is equivalent to ``chan.setblocking(1)``.
+
+ :param float timeout:
+ seconds to wait for a pending read/write operation before raising
+ ``socket.timeout``, or ``None`` for no timeout.
+ """
+ self.timeout = timeout
+
+ def gettimeout(self):
+ """
+ Returns the timeout in seconds (as a float) associated with socket
+ operations, or ``None`` if no timeout is set. This reflects the last
+ call to `setblocking` or `settimeout`.
+ """
+ return self.timeout
+
+ def setblocking(self, blocking):
+ """
+ Set blocking or non-blocking mode of the channel: if ``blocking`` is 0,
+ the channel is set to non-blocking mode; otherwise it's set to blocking
+ mode. Initially all channels are in blocking mode.
+
+ In non-blocking mode, if a `recv` call doesn't find any data, or if a
+ `send` call can't immediately dispose of the data, an error exception
+ is raised. In blocking mode, the calls block until they can proceed. An
+ EOF condition is considered "immediate data" for `recv`, so if the
+ channel is closed in the read direction, it will never block.
+
+ ``chan.setblocking(0)`` is equivalent to ``chan.settimeout(0)``;
+ ``chan.setblocking(1)`` is equivalent to ``chan.settimeout(None)``.
+
+ :param int blocking:
+ 0 to set non-blocking mode; non-0 to set blocking mode.
+ """
+ if blocking:
+ self.settimeout(None)
+ else:
+ self.settimeout(0.0)
+
+ def getpeername(self):
+ """
+ Return the address of the remote side of this Channel, if possible.
+
+ This simply wraps `.Transport.getpeername`, used to provide enough of a
+ socket-like interface to allow asyncore to work. (asyncore likes to
+ call ``'getpeername'``.)
+ """
+ return self.transport.getpeername()
+
+ def close(self):
+ """
+ Close the channel. All future read/write operations on the channel
+ will fail. The remote end will receive no more data (after queued data
+ is flushed). Channels are automatically closed when their `.Transport`
+ is closed or when they are garbage collected.
+ """
+ self.lock.acquire()
+ try:
+ # only close the pipe when the user explicitly closes the channel.
+ # otherwise they will get unpleasant surprises. (and do it before
+ # checking self.closed, since the remote host may have already
+ # closed the connection.)
+ if self._pipe is not None:
+ self._pipe.close()
+ self._pipe = None
+
+ if not self.active or self.closed:
+ return
+ msgs = self._close_internal()
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ def recv_ready(self):
+ """
+ Returns true if data is buffered and ready to be read from this
+ channel. A ``False`` result does not mean that the channel has closed;
+ it means you may need to wait before more data arrives.
+
+ :return:
+ ``True`` if a `recv` call on this channel would immediately return
+ at least one byte; ``False`` otherwise.
+ """
+ return self.in_buffer.read_ready()
+
+ def recv(self, nbytes):
+ """
+ Receive data from the channel. The return value is a string
+ representing the data received. The maximum amount of data to be
+ received at once is specified by ``nbytes``. If a string of
+ length zero is returned, the channel stream has closed.
+
+ :param int nbytes: maximum number of bytes to read.
+ :return: received data, as a `bytes`.
+
+ :raises socket.timeout:
+ if no data is ready before the timeout set by `settimeout`.
+ """
+ try:
+ out = self.in_buffer.read(nbytes, self.timeout)
+ except PipeTimeout:
+ raise socket.timeout()
+
+ ack = self._check_add_window(len(out))
+ # no need to hold the channel lock when sending this
+ if ack > 0:
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_WINDOW_ADJUST)
+ m.add_int(self.remote_chanid)
+ m.add_int(ack)
+ self.transport._send_user_message(m)
+
+ return out
+
+ def recv_stderr_ready(self):
+ """
+ Returns true if data is buffered and ready to be read from this
+ channel's stderr stream. Only channels using `exec_command` or
+ `invoke_shell` without a pty will ever have data on the stderr
+ stream.
+
+ :return:
+ ``True`` if a `recv_stderr` call on this channel would immediately
+ return at least one byte; ``False`` otherwise.
+
+ .. versionadded:: 1.1
+ """
+ return self.in_stderr_buffer.read_ready()
+
+ def recv_stderr(self, nbytes):
+ """
+ Receive data from the channel's stderr stream. Only channels using
+ `exec_command` or `invoke_shell` without a pty will ever have data
+ on the stderr stream. The return value is a string representing the
+ data received. The maximum amount of data to be received at once is
+ specified by ``nbytes``. If a string of length zero is returned, the
+ channel stream has closed.
+
+ :param int nbytes: maximum number of bytes to read.
+ :return: received data as a `bytes`
+
+ :raises socket.timeout: if no data is ready before the timeout set by
+ `settimeout`.
+
+ .. versionadded:: 1.1
+ """
+ try:
+ out = self.in_stderr_buffer.read(nbytes, self.timeout)
+ except PipeTimeout:
+ raise socket.timeout()
+
+ ack = self._check_add_window(len(out))
+ # no need to hold the channel lock when sending this
+ if ack > 0:
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_WINDOW_ADJUST)
+ m.add_int(self.remote_chanid)
+ m.add_int(ack)
+ self.transport._send_user_message(m)
+
+ return out
+
+ def send_ready(self):
+ """
+ Returns true if data can be written to this channel without blocking.
+ This means the channel is either closed (so any write attempt would
+ return immediately) or there is at least one byte of space in the
+ outbound buffer. If there is at least one byte of space in the
+ outbound buffer, a `send` call will succeed immediately and return
+ the number of bytes actually written.
+
+ :return:
+ ``True`` if a `send` call on this channel would immediately succeed
+ or fail
+ """
+ self.lock.acquire()
+ try:
+ if self.closed or self.eof_sent:
+ return True
+ return self.out_window_size > 0
+ finally:
+ self.lock.release()
+
+ def send(self, s):
+ """
+ Send data to the channel. Returns the number of bytes sent, or 0 if
+ the channel stream is closed. Applications are responsible for
+ checking that all data has been sent: if only some of the data was
+ transmitted, the application needs to attempt delivery of the remaining
+ data.
+
+ :param bytes s: data to send
+ :return: number of bytes actually sent, as an `int`
+
+ :raises socket.timeout: if no data could be sent before the timeout set
+ by `settimeout`.
+ """
+
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_DATA)
+ m.add_int(self.remote_chanid)
+ return self._send(s, m)
+
+ def send_stderr(self, s):
+ """
+ Send data to the channel on the "stderr" stream. This is normally
+ only used by servers to send output from shell commands -- clients
+ won't use this. Returns the number of bytes sent, or 0 if the channel
+ stream is closed. Applications are responsible for checking that all
+ data has been sent: if only some of the data was transmitted, the
+ application needs to attempt delivery of the remaining data.
+
+ :param bytes s: data to send.
+ :return: number of bytes actually sent, as an `int`.
+
+ :raises socket.timeout:
+ if no data could be sent before the timeout set by `settimeout`.
+
+ .. versionadded:: 1.1
+ """
+
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_EXTENDED_DATA)
+ m.add_int(self.remote_chanid)
+ m.add_int(1)
+ return self._send(s, m)
+
+ def sendall(self, s):
+ """
+ Send data to the channel, without allowing partial results. Unlike
+ `send`, this method continues to send data from the given string until
+ either all data has been sent or an error occurs. Nothing is returned.
+
+ :param bytes s: data to send.
+
+ :raises socket.timeout:
+ if sending stalled for longer than the timeout set by `settimeout`.
+ :raises socket.error:
+ if an error occurred before the entire string was sent.
+
+ .. note::
+ If the channel is closed while only part of the data has been
+ sent, there is no way to determine how much data (if any) was sent.
+ This is irritating, but identically follows Python's API.
+ """
+ while s:
+ sent = self.send(s)
+ s = s[sent:]
+ return None
+
+ def sendall_stderr(self, s):
+ """
+ Send data to the channel's "stderr" stream, without allowing partial
+ results. Unlike `send_stderr`, this method continues to send data
+ from the given bytestring until all data has been sent or an error
+ occurs. Nothing is returned.
+
+ :param bytes s: data to send to the client as "stderr" output.
+
+ :raises socket.timeout:
+ if sending stalled for longer than the timeout set by `settimeout`.
+ :raises socket.error:
+ if an error occurred before the entire string was sent.
+
+ .. versionadded:: 1.1
+ """
+ while s:
+ sent = self.send_stderr(s)
+ s = s[sent:]
+ return None
+
+ def makefile(self, *params):
+ """
+ Return a file-like object associated with this channel. The optional
+ ``mode`` and ``bufsize`` arguments are interpreted the same way as by
+ the built-in ``file()`` function in Python.
+
+ :return: `.ChannelFile` object which can be used for Python file I/O.
+ """
+ return ChannelFile(*([self] + list(params)))
+
+ def makefile_stderr(self, *params):
+ """
+ Return a file-like object associated with this channel's stderr
+ stream. Only channels using `exec_command` or `invoke_shell`
+ without a pty will ever have data on the stderr stream.
+
+ The optional ``mode`` and ``bufsize`` arguments are interpreted the
+ same way as by the built-in ``file()`` function in Python. For a
+ client, it only makes sense to open this file for reading. For a
+ server, it only makes sense to open this file for writing.
+
+ :returns:
+ `.ChannelStderrFile` object which can be used for Python file I/O.
+
+ .. versionadded:: 1.1
+ """
+ return ChannelStderrFile(*([self] + list(params)))
+
+ def makefile_stdin(self, *params):
+ """
+ Return a file-like object associated with this channel's stdin
+ stream.
+
+ The optional ``mode`` and ``bufsize`` arguments are interpreted the
+ same way as by the built-in ``file()`` function in Python. For a
+ client, it only makes sense to open this file for writing. For a
+ server, it only makes sense to open this file for reading.
+
+ :returns:
+ `.ChannelStdinFile` object which can be used for Python file I/O.
+
+ .. versionadded:: 2.6
+ """
+ return ChannelStdinFile(*([self] + list(params)))
+
+ def fileno(self):
+ """
+ Returns an OS-level file descriptor which can be used for polling, but
+ but not for reading or writing. This is primarily to allow Python's
+ ``select`` module to work.
+
+ The first time ``fileno`` is called on a channel, a pipe is created to
+ simulate real OS-level file descriptor (FD) behavior. Because of this,
+ two OS-level FDs are created, which will use up FDs faster than normal.
+ (You won't notice this effect unless you have hundreds of channels
+ open at the same time.)
+
+ :return: an OS-level file descriptor (`int`)
+
+ .. warning::
+ This method causes channel reads to be slightly less efficient.
+ """
+ self.lock.acquire()
+ try:
+ if self._pipe is not None:
+ return self._pipe.fileno()
+ # create the pipe and feed in any existing data
+ self._pipe = pipe.make_pipe()
+ p1, p2 = pipe.make_or_pipe(self._pipe)
+ self.in_buffer.set_event(p1)
+ self.in_stderr_buffer.set_event(p2)
+ return self._pipe.fileno()
+ finally:
+ self.lock.release()
+
+ def shutdown(self, how):
+ """
+ Shut down one or both halves of the connection. If ``how`` is 0,
+ further receives are disallowed. If ``how`` is 1, further sends
+ are disallowed. If ``how`` is 2, further sends and receives are
+ disallowed. This closes the stream in one or both directions.
+
+ :param int how:
+ 0 (stop receiving), 1 (stop sending), or 2 (stop receiving and
+ sending).
+ """
+ if (how == 0) or (how == 2):
+ # feign "read" shutdown
+ self.eof_received = 1
+ if (how == 1) or (how == 2):
+ self.lock.acquire()
+ try:
+ m = self._send_eof()
+ finally:
+ self.lock.release()
+ if m is not None and self.transport is not None:
+ self.transport._send_user_message(m)
+
+ def shutdown_read(self):
+ """
+ Shutdown the receiving side of this socket, closing the stream in
+ the incoming direction. After this call, future reads on this
+ channel will fail instantly. This is a convenience method, equivalent
+ to ``shutdown(0)``, for people who don't make it a habit to
+ memorize unix constants from the 1970s.
+
+ .. versionadded:: 1.2
+ """
+ self.shutdown(0)
+
+ def shutdown_write(self):
+ """
+ Shutdown the sending side of this socket, closing the stream in
+ the outgoing direction. After this call, future writes on this
+ channel will fail instantly. This is a convenience method, equivalent
+ to ``shutdown(1)``, for people who don't make it a habit to
+ memorize unix constants from the 1970s.
+
+ .. versionadded:: 1.2
+ """
+ self.shutdown(1)
+
+ @property
+ def _closed(self):
+ # Concession to Python 3's socket API, which has a private ._closed
+ # attribute instead of a semipublic .closed attribute.
+ return self.closed
+
+ # ...calls from Transport
+
+ def _set_transport(self, transport):
+ self.transport = transport
+ self.logger = util.get_logger(self.transport.get_log_channel())
+
+ def _set_window(self, window_size, max_packet_size):
+ self.in_window_size = window_size
+ self.in_max_packet_size = max_packet_size
+ # threshold of bytes we receive before we bother to send
+ # a window update
+ self.in_window_threshold = window_size // 10
+ self.in_window_sofar = 0
+ self._log(DEBUG, "Max packet in: {} bytes".format(max_packet_size))
+
+ def _set_remote_channel(self, chanid, window_size, max_packet_size):
+ self.remote_chanid = chanid
+ self.out_window_size = window_size
+ self.out_max_packet_size = self.transport._sanitize_packet_size(
+ max_packet_size
+ )
+ self.active = 1
+ self._log(
+ DEBUG, "Max packet out: {} bytes".format(self.out_max_packet_size)
+ )
+
+ def _request_success(self, m):
+ self._log(DEBUG, "Sesch channel {} request ok".format(self.chanid))
+ self.event_ready = True
+ self.event.set()
+ return
+
+ def _request_failed(self, m):
+ self.lock.acquire()
+ try:
+ msgs = self._close_internal()
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ def _feed(self, m):
+ if isinstance(m, bytes):
+ # passed from _feed_extended
+ s = m
+ else:
+ s = m.get_binary()
+ self.in_buffer.feed(s)
+
+ def _feed_extended(self, m):
+ code = m.get_int()
+ s = m.get_binary()
+ if code != 1:
+ self._log(
+ ERROR, "unknown extended_data type {}; discarding".format(code)
+ )
+ return
+ if self.combine_stderr:
+ self._feed(s)
+ else:
+ self.in_stderr_buffer.feed(s)
+
+ def _window_adjust(self, m):
+ nbytes = m.get_int()
+ self.lock.acquire()
+ try:
+ if self.ultra_debug:
+ self._log(DEBUG, "window up {}".format(nbytes))
+ self.out_window_size += nbytes
+ self.out_buffer_cv.notify_all()
+ finally:
+ self.lock.release()
+
+ def _handle_request(self, m):
+ key = m.get_text()
+ want_reply = m.get_boolean()
+ server = self.transport.server_object
+ ok = False
+ if key == "exit-status":
+ self.exit_status = m.get_int()
+ self.status_event.set()
+ ok = True
+ elif key == "xon-xoff":
+ # ignore
+ ok = True
+ elif key == "pty-req":
+ term = m.get_string()
+ width = m.get_int()
+ height = m.get_int()
+ pixelwidth = m.get_int()
+ pixelheight = m.get_int()
+ modes = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_pty_request(
+ self, term, width, height, pixelwidth, pixelheight, modes
+ )
+ elif key == "shell":
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_shell_request(self)
+ elif key == "env":
+ name = m.get_string()
+ value = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_env_request(self, name, value)
+ elif key == "exec":
+ cmd = m.get_string()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_exec_request(self, cmd)
+ elif key == "subsystem":
+ name = m.get_text()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_subsystem_request(self, name)
+ elif key == "window-change":
+ width = m.get_int()
+ height = m.get_int()
+ pixelwidth = m.get_int()
+ pixelheight = m.get_int()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_window_change_request(
+ self, width, height, pixelwidth, pixelheight
+ )
+ elif key == "x11-req":
+ single_connection = m.get_boolean()
+ auth_proto = m.get_text()
+ auth_cookie = m.get_binary()
+ screen_number = m.get_int()
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_x11_request(
+ self,
+ single_connection,
+ auth_proto,
+ auth_cookie,
+ screen_number,
+ )
+ elif key == "auth-agent-req@openssh.com":
+ if server is None:
+ ok = False
+ else:
+ ok = server.check_channel_forward_agent_request(self)
+ else:
+ self._log(DEBUG, 'Unhandled channel request "{}"'.format(key))
+ ok = False
+ if want_reply:
+ m = Message()
+ if ok:
+ m.add_byte(cMSG_CHANNEL_SUCCESS)
+ else:
+ m.add_byte(cMSG_CHANNEL_FAILURE)
+ m.add_int(self.remote_chanid)
+ self.transport._send_user_message(m)
+
+ def _handle_eof(self, m):
+ self.lock.acquire()
+ try:
+ if not self.eof_received:
+ self.eof_received = True
+ self.in_buffer.close()
+ self.in_stderr_buffer.close()
+ if self._pipe is not None:
+ self._pipe.set_forever()
+ finally:
+ self.lock.release()
+ self._log(DEBUG, "EOF received ({})".format(self._name))
+
+ def _handle_close(self, m):
+ self.lock.acquire()
+ try:
+ msgs = self._close_internal()
+ self.transport._unlink_channel(self.chanid)
+ finally:
+ self.lock.release()
+ for m in msgs:
+ if m is not None:
+ self.transport._send_user_message(m)
+
+ # ...internals...
+
+ def _send(self, s, m):
+ size = len(s)
+ self.lock.acquire()
+ try:
+ if self.closed:
+ # this doesn't seem useful, but it is the documented behavior
+ # of Socket
+ raise socket.error("Socket is closed")
+ size = self._wait_for_send_window(size)
+ if size == 0:
+ # eof or similar
+ return 0
+ m.add_string(s[:size])
+ finally:
+ self.lock.release()
+ # Note: We release self.lock before calling _send_user_message.
+ # Otherwise, we can deadlock during re-keying.
+ self.transport._send_user_message(m)
+ return size
+
+ def _log(self, level, msg, *args):
+ self.logger.log(level, "[chan " + self._name + "] " + msg, *args)
+
+ def _event_pending(self):
+ self.event.clear()
+ self.event_ready = False
+
+ def _wait_for_event(self):
+ self.event.wait()
+ assert self.event.is_set()
+ if self.event_ready:
+ return
+ e = self.transport.get_exception()
+ if e is None:
+ e = SSHException("Channel closed.")
+ raise e
+
+ def _set_closed(self):
+ # you are holding the lock.
+ self.closed = True
+ self.in_buffer.close()
+ self.in_stderr_buffer.close()
+ self.out_buffer_cv.notify_all()
+ # Notify any waiters that we are closed
+ self.event.set()
+ self.status_event.set()
+ if self._pipe is not None:
+ self._pipe.set_forever()
+
+ def _send_eof(self):
+ # you are holding the lock.
+ if self.eof_sent:
+ return None
+ m = Message()
+ m.add_byte(cMSG_CHANNEL_EOF)
+ m.add_int(self.remote_chanid)
+ self.eof_sent = True
+ self._log(DEBUG, "EOF sent ({})".format(self._name))
+ return m
+
+ def _close_internal(self):
+ # you are holding the lock.
+ if not self.active or self.closed:
+ return None, None
+ m1 = self._send_eof()
+ m2 = Message()
+ m2.add_byte(cMSG_CHANNEL_CLOSE)
+ m2.add_int(self.remote_chanid)
+ self._set_closed()
+ # can't unlink from the Transport yet -- the remote side may still
+ # try to send meta-data (exit-status, etc)
+ return m1, m2
+
+ def _unlink(self):
+ # server connection could die before we become active:
+ # still signal the close!
+ if self.closed:
+ return
+ self.lock.acquire()
+ try:
+ self._set_closed()
+ self.transport._unlink_channel(self.chanid)
+ finally:
+ self.lock.release()
+
+ def _check_add_window(self, n):
+ self.lock.acquire()
+ try:
+ if self.closed or self.eof_received or not self.active:
+ return 0
+ if self.ultra_debug:
+ self._log(DEBUG, "addwindow {}".format(n))
+ self.in_window_sofar += n
+ if self.in_window_sofar <= self.in_window_threshold:
+ return 0
+ if self.ultra_debug:
+ self._log(
+ DEBUG, "addwindow send {}".format(self.in_window_sofar)
+ )
+ out = self.in_window_sofar
+ self.in_window_sofar = 0
+ return out
+ finally:
+ self.lock.release()
+
+ def _wait_for_send_window(self, size):
+ """
+ (You are already holding the lock.)
+ Wait for the send window to open up, and allocate up to ``size`` bytes
+ for transmission. If no space opens up before the timeout, a timeout
+ exception is raised. Returns the number of bytes available to send
+ (may be less than requested).
+ """
+ # you are already holding the lock
+ if self.closed or self.eof_sent:
+ return 0
+ if self.out_window_size == 0:
+ # should we block?
+ if self.timeout == 0.0:
+ raise socket.timeout()
+ # loop here in case we get woken up but a different thread has
+ # filled the buffer
+ timeout = self.timeout
+ while self.out_window_size == 0:
+ if self.closed or self.eof_sent:
+ return 0
+ then = time.time()
+ self.out_buffer_cv.wait(timeout)
+ if timeout is not None:
+ timeout -= time.time() - then
+ if timeout <= 0.0:
+ raise socket.timeout()
+ # we have some window to squeeze into
+ if self.closed or self.eof_sent:
+ return 0
+ if self.out_window_size < size:
+ size = self.out_window_size
+ if self.out_max_packet_size - 64 < size:
+ size = self.out_max_packet_size - 64
+ self.out_window_size -= size
+ if self.ultra_debug:
+ self._log(DEBUG, "window down to {}".format(self.out_window_size))
+ return size
+
+
+class ChannelFile(BufferedFile):
+ """
+ A file-like wrapper around `.Channel`. A ChannelFile is created by calling
+ `Channel.makefile`.
+
+ .. warning::
+ To correctly emulate the file object created from a socket's `makefile
+ ` method, a `.Channel` and its
+ `.ChannelFile` should be able to be closed or garbage-collected
+ independently. Currently, closing the `ChannelFile` does nothing but
+ flush the buffer.
+ """
+
+ def __init__(self, channel, mode="r", bufsize=-1):
+ self.channel = channel
+ BufferedFile.__init__(self)
+ self._set_mode(mode, bufsize)
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+ """
+ return ""
+
+ def _read(self, size):
+ return self.channel.recv(size)
+
+ def _write(self, data):
+ self.channel.sendall(data)
+ return len(data)
+
+
+class ChannelStderrFile(ChannelFile):
+ """
+ A file-like wrapper around `.Channel` stderr.
+
+ See `Channel.makefile_stderr` for details.
+ """
+
+ def _read(self, size):
+ return self.channel.recv_stderr(size)
+
+ def _write(self, data):
+ self.channel.sendall_stderr(data)
+ return len(data)
+
+
+class ChannelStdinFile(ChannelFile):
+ """
+ A file-like wrapper around `.Channel` stdin.
+
+ See `Channel.makefile_stdin` for details.
+ """
+
+ def close(self):
+ super().close()
+ self.channel.shutdown_write()
diff --git a/lib/paramiko/client.py b/lib/paramiko/client.py
new file mode 100644
index 0000000..1f674a9
--- /dev/null
+++ b/lib/paramiko/client.py
@@ -0,0 +1,889 @@
+# Copyright (C) 2006-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+SSH client & key policies
+"""
+
+from binascii import hexlify
+import getpass
+import inspect
+import os
+import socket
+import warnings
+from errno import ECONNREFUSED, EHOSTUNREACH
+
+from paramiko.agent import Agent
+from paramiko.common import DEBUG
+from paramiko.config import SSH_PORT
+from paramiko.ecdsakey import ECDSAKey
+from paramiko.ed25519key import Ed25519Key
+from paramiko.hostkeys import HostKeys
+from paramiko.rsakey import RSAKey
+from paramiko.ssh_exception import (
+ SSHException,
+ BadHostKeyException,
+ NoValidConnectionsError,
+)
+from paramiko.transport import Transport
+from paramiko.util import ClosingContextManager
+
+
+class SSHClient(ClosingContextManager):
+ """
+ A high-level representation of a session with an SSH server. This class
+ wraps `.Transport`, `.Channel`, and `.SFTPClient` to take care of most
+ aspects of authenticating and opening channels. A typical use case is::
+
+ client = SSHClient()
+ client.load_system_host_keys()
+ client.connect('ssh.example.com')
+ stdin, stdout, stderr = client.exec_command('ls -l')
+
+ You may pass in explicit overrides for authentication and server host key
+ checking. The default mechanism is to try to use local key files or an
+ SSH agent (if one is running).
+
+ Instances of this class may be used as context managers.
+
+ .. versionadded:: 1.6
+ """
+
+ def __init__(self):
+ """
+ Create a new SSHClient.
+ """
+ self._system_host_keys = HostKeys()
+ self._host_keys = HostKeys()
+ self._host_keys_filename = None
+ self._log_channel = None
+ self._policy = RejectPolicy()
+ self._transport = None
+ self._agent = None
+
+ def load_system_host_keys(self, filename=None):
+ """
+ Load host keys from a system (read-only) file. Host keys read with
+ this method will not be saved back by `save_host_keys`.
+
+ This method can be called multiple times. Each new set of host keys
+ will be merged with the existing set (new replacing old if there are
+ conflicts).
+
+ If ``filename`` is left as ``None``, an attempt will be made to read
+ keys from the user's local "known hosts" file, as used by OpenSSH,
+ and no exception will be raised if the file can't be read. This is
+ probably only useful on posix.
+
+ :param str filename: the filename to read, or ``None``
+
+ :raises: ``IOError`` --
+ if a filename was provided and the file could not be read
+ """
+ if filename is None:
+ # try the user's .ssh key file, and mask exceptions
+ filename = os.path.expanduser("~/.ssh/known_hosts")
+ try:
+ self._system_host_keys.load(filename)
+ except IOError:
+ pass
+ return
+ self._system_host_keys.load(filename)
+
+ def load_host_keys(self, filename):
+ """
+ Load host keys from a local host-key file. Host keys read with this
+ method will be checked after keys loaded via `load_system_host_keys`,
+ but will be saved back by `save_host_keys` (so they can be modified).
+ The missing host key policy `.AutoAddPolicy` adds keys to this set and
+ saves them, when connecting to a previously-unknown server.
+
+ This method can be called multiple times. Each new set of host keys
+ will be merged with the existing set (new replacing old if there are
+ conflicts). When automatically saving, the last hostname is used.
+
+ :param str filename: the filename to read
+
+ :raises: ``IOError`` -- if the filename could not be read
+ """
+ self._host_keys_filename = filename
+ self._host_keys.load(filename)
+
+ def save_host_keys(self, filename):
+ """
+ Save the host keys back to a file. Only the host keys loaded with
+ `load_host_keys` (plus any added directly) will be saved -- not any
+ host keys loaded with `load_system_host_keys`.
+
+ :param str filename: the filename to save to
+
+ :raises: ``IOError`` -- if the file could not be written
+ """
+
+ # update local host keys from file (in case other SSH clients
+ # have written to the known_hosts file meanwhile.
+ if self._host_keys_filename is not None:
+ self.load_host_keys(self._host_keys_filename)
+
+ with open(filename, "w") as f:
+ for hostname, keys in self._host_keys.items():
+ for keytype, key in keys.items():
+ f.write(
+ "{} {} {}\n".format(
+ hostname, keytype, key.get_base64()
+ )
+ )
+
+ def get_host_keys(self):
+ """
+ Get the local `.HostKeys` object. This can be used to examine the
+ local host keys or change them.
+
+ :return: the local host keys as a `.HostKeys` object.
+ """
+ return self._host_keys
+
+ def set_log_channel(self, name):
+ """
+ Set the channel for logging. The default is ``"paramiko.transport"``
+ but it can be set to anything you want.
+
+ :param str name: new channel name for logging
+ """
+ self._log_channel = name
+
+ def set_missing_host_key_policy(self, policy):
+ """
+ Set policy to use when connecting to servers without a known host key.
+
+ Specifically:
+
+ * A **policy** is a "policy class" (or instance thereof), namely some
+ subclass of `.MissingHostKeyPolicy` such as `.RejectPolicy` (the
+ default), `.AutoAddPolicy`, `.WarningPolicy`, or a user-created
+ subclass.
+ * A host key is **known** when it appears in the client object's cached
+ host keys structures (those manipulated by `load_system_host_keys`
+ and/or `load_host_keys`).
+
+ :param .MissingHostKeyPolicy policy:
+ the policy to use when receiving a host key from a
+ previously-unknown server
+ """
+ if inspect.isclass(policy):
+ policy = policy()
+ self._policy = policy
+
+ def _families_and_addresses(self, hostname, port):
+ """
+ Yield pairs of address families and addresses to try for connecting.
+
+ :param str hostname: the server to connect to
+ :param int port: the server port to connect to
+ :returns: Yields an iterable of ``(family, address)`` tuples
+ """
+ guess = True
+ addrinfos = socket.getaddrinfo(
+ hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
+ )
+ for (family, socktype, proto, canonname, sockaddr) in addrinfos:
+ if socktype == socket.SOCK_STREAM:
+ yield family, sockaddr
+ guess = False
+
+ # some OS like AIX don't indicate SOCK_STREAM support, so just
+ # guess. :( We only do this if we did not get a single result marked
+ # as socktype == SOCK_STREAM.
+ if guess:
+ for family, _, _, _, sockaddr in addrinfos:
+ yield family, sockaddr
+
+ def connect(
+ self,
+ hostname,
+ port=SSH_PORT,
+ username=None,
+ password=None,
+ pkey=None,
+ key_filename=None,
+ timeout=None,
+ allow_agent=True,
+ look_for_keys=True,
+ compress=False,
+ sock=None,
+ gss_auth=False,
+ gss_kex=False,
+ gss_deleg_creds=True,
+ gss_host=None,
+ banner_timeout=None,
+ auth_timeout=None,
+ channel_timeout=None,
+ gss_trust_dns=True,
+ passphrase=None,
+ disabled_algorithms=None,
+ transport_factory=None,
+ auth_strategy=None,
+ ):
+ """
+ Connect to an SSH server and authenticate to it. The server's host key
+ is checked against the system host keys (see `load_system_host_keys`)
+ and any local host keys (`load_host_keys`). If the server's hostname
+ is not found in either set of host keys, the missing host key policy
+ is used (see `set_missing_host_key_policy`). The default policy is
+ to reject the key and raise an `.SSHException`.
+
+ Authentication is attempted in the following order of priority:
+
+ - The ``pkey`` or ``key_filename`` passed in (if any)
+
+ - ``key_filename`` may contain OpenSSH public certificate paths
+ as well as regular private-key paths; when files ending in
+ ``-cert.pub`` are found, they are assumed to match a private
+ key, and both components will be loaded. (The private key
+ itself does *not* need to be listed in ``key_filename`` for
+ this to occur - *just* the certificate.)
+
+ - Any key we can find through an SSH agent
+ - Any ``id_*`` keys discoverable in ``~/.ssh/``
+
+ - When OpenSSH-style public certificates exist that match an
+ existing such private key (so e.g. one has ``id_rsa`` and
+ ``id_rsa-cert.pub``) the certificate will be loaded alongside
+ the private key and used for authentication.
+
+ - Plain username/password auth, if a password was given
+
+ If a private key requires a password to unlock it, and a password is
+ passed in, that password will be used to attempt to unlock the key.
+
+ :param str hostname: the server to connect to
+ :param int port: the server port to connect to
+ :param str username:
+ the username to authenticate as (defaults to the current local
+ username)
+ :param str password:
+ Used for password authentication; is also used for private key
+ decryption if ``passphrase`` is not given.
+ :param str passphrase:
+ Used for decrypting private keys.
+ :param .PKey pkey: an optional private key to use for authentication
+ :param str key_filename:
+ the filename, or list of filenames, of optional private key(s)
+ and/or certs to try for authentication
+ :param float timeout:
+ an optional timeout (in seconds) for the TCP connect
+ :param bool allow_agent:
+ set to False to disable connecting to the SSH agent
+ :param bool look_for_keys:
+ set to False to disable searching for discoverable private key
+ files in ``~/.ssh/``
+ :param bool compress: set to True to turn on compression
+ :param socket sock:
+ an open socket or socket-like object (such as a `.Channel`) to use
+ for communication to the target host
+ :param bool gss_auth:
+ ``True`` if you want to use GSS-API authentication
+ :param bool gss_kex:
+ Perform GSS-API Key Exchange and user authentication
+ :param bool gss_deleg_creds: Delegate GSS-API client credentials or not
+ :param str gss_host:
+ The targets name in the kerberos database. default: hostname
+ :param bool gss_trust_dns:
+ Indicates whether or not the DNS is trusted to securely
+ canonicalize the name of the host being connected to (default
+ ``True``).
+ :param float banner_timeout: an optional timeout (in seconds) to wait
+ for the SSH banner to be presented.
+ :param float auth_timeout: an optional timeout (in seconds) to wait for
+ an authentication response.
+ :param float channel_timeout: an optional timeout (in seconds) to wait
+ for a channel open response.
+ :param dict disabled_algorithms:
+ an optional dict passed directly to `.Transport` and its keyword
+ argument of the same name.
+ :param transport_factory:
+ an optional callable which is handed a subset of the constructor
+ arguments (primarily those related to the socket, GSS
+ functionality, and algorithm selection) and generates a
+ `.Transport` instance to be used by this client. Defaults to
+ `.Transport.__init__`.
+ :param auth_strategy:
+ an optional instance of `.AuthStrategy`, triggering use of this
+ newer authentication mechanism instead of SSHClient's legacy auth
+ method.
+
+ .. warning::
+ This parameter is **incompatible** with all other
+ authentication-related parameters (such as, but not limited to,
+ ``password``, ``key_filename`` and ``allow_agent``) and will
+ trigger an exception if given alongside them.
+
+ :returns:
+ `.AuthResult` if ``auth_strategy`` is non-``None``; otherwise,
+ returns ``None``.
+
+ :raises BadHostKeyException:
+ if the server's host key could not be verified.
+ :raises AuthenticationException:
+ if authentication failed.
+ :raises UnableToAuthenticate:
+ if authentication failed (when ``auth_strategy`` is non-``None``;
+ and note that this is a subclass of ``AuthenticationException``).
+ :raises socket.error:
+ if a socket error (other than connection-refused or
+ host-unreachable) occurred while connecting.
+ :raises NoValidConnectionsError:
+ if all valid connection targets for the requested hostname (eg IPv4
+ and IPv6) yielded connection-refused or host-unreachable socket
+ errors.
+ :raises SSHException:
+ if there was any other error connecting or establishing an SSH
+ session.
+
+ .. versionchanged:: 1.15
+ Added the ``banner_timeout``, ``gss_auth``, ``gss_kex``,
+ ``gss_deleg_creds`` and ``gss_host`` arguments.
+ .. versionchanged:: 2.3
+ Added the ``gss_trust_dns`` argument.
+ .. versionchanged:: 2.4
+ Added the ``passphrase`` argument.
+ .. versionchanged:: 2.6
+ Added the ``disabled_algorithms`` argument.
+ .. versionchanged:: 2.12
+ Added the ``transport_factory`` argument.
+ .. versionchanged:: 3.2
+ Added the ``auth_strategy`` argument.
+ """
+ if not sock:
+ errors = {}
+ # Try multiple possible address families (e.g. IPv4 vs IPv6)
+ to_try = list(self._families_and_addresses(hostname, port))
+ for af, addr in to_try:
+ try:
+ sock = socket.socket(af, socket.SOCK_STREAM)
+ if timeout is not None:
+ try:
+ sock.settimeout(timeout)
+ except:
+ pass
+ sock.connect(addr)
+ # Break out of the loop on success
+ break
+ except socket.error as e:
+ # As mentioned in socket docs it is better
+ # to close sockets explicitly
+ if sock:
+ sock.close()
+ # Raise anything that isn't a straight up connection error
+ # (such as a resolution error)
+ if e.errno not in (ECONNREFUSED, EHOSTUNREACH):
+ raise
+ # Capture anything else so we know how the run looks once
+ # iteration is complete. Retain info about which attempt
+ # this was.
+ errors[addr] = e
+
+ # Make sure we explode usefully if no address family attempts
+ # succeeded. We've no way of knowing which error is the "right"
+ # one, so we construct a hybrid exception containing all the real
+ # ones, of a subclass that client code should still be watching for
+ # (socket.error)
+ if len(errors) == len(to_try):
+ raise NoValidConnectionsError(errors)
+
+ if transport_factory is None:
+ transport_factory = Transport
+ t = self._transport = transport_factory(
+ sock,
+ gss_kex=gss_kex,
+ gss_deleg_creds=gss_deleg_creds,
+ disabled_algorithms=disabled_algorithms,
+ )
+ t.use_compression(compress=compress)
+ t.set_gss_host(
+ # t.hostname may be None, but GSS-API requires a target name.
+ # Therefore use hostname as fallback.
+ gss_host=gss_host or hostname,
+ trust_dns=gss_trust_dns,
+ gssapi_requested=gss_auth or gss_kex,
+ )
+ if self._log_channel is not None:
+ t.set_log_channel(self._log_channel)
+ if banner_timeout is not None:
+ t.banner_timeout = banner_timeout
+ if auth_timeout is not None:
+ t.auth_timeout = auth_timeout
+ if channel_timeout is not None:
+ t.channel_timeout = channel_timeout
+
+ if port == SSH_PORT:
+ server_hostkey_name = hostname
+ else:
+ server_hostkey_name = "[{}]:{}".format(hostname, port)
+ our_server_keys = None
+
+ our_server_keys = self._system_host_keys.get(server_hostkey_name)
+ if our_server_keys is None:
+ our_server_keys = self._host_keys.get(server_hostkey_name)
+ if our_server_keys is not None:
+ keytype = our_server_keys.keys()[0]
+ sec_opts = t.get_security_options()
+ other_types = [x for x in sec_opts.key_types if x != keytype]
+ sec_opts.key_types = [keytype] + other_types
+
+ t.start_client(timeout=timeout)
+
+ # If GSS-API Key Exchange is performed we are not required to check the
+ # host key, because the host is authenticated via GSS-API / SSPI as
+ # well as our client.
+ if not self._transport.gss_kex_used:
+ server_key = t.get_remote_server_key()
+ if our_server_keys is None:
+ # will raise exception if the key is rejected
+ self._policy.missing_host_key(
+ self, server_hostkey_name, server_key
+ )
+ else:
+ our_key = our_server_keys.get(server_key.get_name())
+ if our_key != server_key:
+ if our_key is None:
+ our_key = list(our_server_keys.values())[0]
+ raise BadHostKeyException(hostname, server_key, our_key)
+
+ if username is None:
+ username = getpass.getuser()
+
+ # New auth flow!
+ if auth_strategy is not None:
+ return auth_strategy.authenticate(transport=t)
+
+ # Old auth flow!
+ if key_filename is None:
+ key_filenames = []
+ elif isinstance(key_filename, str):
+ key_filenames = [key_filename]
+ else:
+ key_filenames = key_filename
+
+ self._auth(
+ username,
+ password,
+ pkey,
+ key_filenames,
+ allow_agent,
+ look_for_keys,
+ gss_auth,
+ gss_kex,
+ gss_deleg_creds,
+ t.gss_host,
+ passphrase,
+ )
+
+ def close(self):
+ """
+ Close this SSHClient and its underlying `.Transport`.
+
+ This should be called anytime you are done using the client object.
+
+ .. warning::
+ Paramiko registers garbage collection hooks that will try to
+ automatically close connections for you, but this is not presently
+ reliable. Failure to explicitly close your client after use may
+ lead to end-of-process hangs!
+ """
+ if self._transport is None:
+ return
+ self._transport.close()
+ self._transport = None
+
+ if self._agent is not None:
+ self._agent.close()
+ self._agent = None
+
+ def exec_command(
+ self,
+ command,
+ bufsize=-1,
+ timeout=None,
+ get_pty=False,
+ environment=None,
+ ):
+ """
+ Execute a command on the SSH server. A new `.Channel` is opened and
+ the requested command is executed. The command's input and output
+ streams are returned as Python ``file``-like objects representing
+ stdin, stdout, and stderr.
+
+ :param str command: the command to execute
+ :param int bufsize:
+ interpreted the same way as by the built-in ``file()`` function in
+ Python
+ :param int timeout:
+ set command's channel timeout. See `.Channel.settimeout`
+ :param bool get_pty:
+ Request a pseudo-terminal from the server (default ``False``).
+ See `.Channel.get_pty`
+ :param dict environment:
+ a dict of shell environment variables, to be merged into the
+ default environment that the remote command executes within.
+
+ .. warning::
+ Servers may silently reject some environment variables; see the
+ warning in `.Channel.set_environment_variable` for details.
+
+ :return:
+ the stdin, stdout, and stderr of the executing command, as a
+ 3-tuple
+
+ :raises: `.SSHException` -- if the server fails to execute the command
+
+ .. versionchanged:: 1.10
+ Added the ``get_pty`` kwarg.
+ """
+ chan = self._transport.open_session(timeout=timeout)
+ if get_pty:
+ chan.get_pty()
+ chan.settimeout(timeout)
+ if environment:
+ chan.update_environment(environment)
+ chan.exec_command(command)
+ stdin = chan.makefile_stdin("wb", bufsize)
+ stdout = chan.makefile("r", bufsize)
+ stderr = chan.makefile_stderr("r", bufsize)
+ return stdin, stdout, stderr
+
+ def invoke_shell(
+ self,
+ term="vt100",
+ width=80,
+ height=24,
+ width_pixels=0,
+ height_pixels=0,
+ environment=None,
+ ):
+ """
+ Start an interactive shell session on the SSH server. A new `.Channel`
+ is opened and connected to a pseudo-terminal using the requested
+ terminal type and size.
+
+ :param str term:
+ the terminal type to emulate (for example, ``"vt100"``)
+ :param int width: the width (in characters) of the terminal window
+ :param int height: the height (in characters) of the terminal window
+ :param int width_pixels: the width (in pixels) of the terminal window
+ :param int height_pixels: the height (in pixels) of the terminal window
+ :param dict environment: the command's environment
+ :return: a new `.Channel` connected to the remote shell
+
+ :raises: `.SSHException` -- if the server fails to invoke a shell
+ """
+ chan = self._transport.open_session()
+ chan.get_pty(term, width, height, width_pixels, height_pixels)
+ chan.invoke_shell()
+ return chan
+
+ def open_sftp(self):
+ """
+ Open an SFTP session on the SSH server.
+
+ :return: a new `.SFTPClient` session object
+ """
+ return self._transport.open_sftp_client()
+
+ def get_transport(self):
+ """
+ Return the underlying `.Transport` object for this SSH connection.
+ This can be used to perform lower-level tasks, like opening specific
+ kinds of channels.
+
+ :return: the `.Transport` for this connection
+ """
+ return self._transport
+
+ def _key_from_filepath(self, filename, klass, password):
+ """
+ Attempt to derive a `.PKey` from given string path ``filename``:
+
+ - If ``filename`` appears to be a cert, the matching private key is
+ loaded.
+ - Otherwise, the filename is assumed to be a private key, and the
+ matching public cert will be loaded if it exists.
+ """
+ cert_suffix = "-cert.pub"
+ # Assume privkey, not cert, by default
+ if filename.endswith(cert_suffix):
+ key_path = filename[: -len(cert_suffix)]
+ cert_path = filename
+ else:
+ key_path = filename
+ cert_path = filename + cert_suffix
+ # Blindly try the key path; if no private key, nothing will work.
+ key = klass.from_private_key_file(key_path, password)
+ # TODO: change this to 'Loading' instead of 'Trying' sometime; probably
+ # when #387 is released, since this is a critical log message users are
+ # likely testing/filtering for (bah.)
+ msg = "Trying discovered key {} in {}".format(
+ hexlify(key.get_fingerprint()), key_path
+ )
+ self._log(DEBUG, msg)
+ # Attempt to load cert if it exists.
+ if os.path.isfile(cert_path):
+ key.load_certificate(cert_path)
+ self._log(DEBUG, "Adding public certificate {}".format(cert_path))
+ return key
+
+ def _auth(
+ self,
+ username,
+ password,
+ pkey,
+ key_filenames,
+ allow_agent,
+ look_for_keys,
+ gss_auth,
+ gss_kex,
+ gss_deleg_creds,
+ gss_host,
+ passphrase,
+ ):
+ """
+ Try, in order:
+
+ - The key(s) passed in, if one was passed in.
+ - Any key we can find through an SSH agent (if allowed).
+ - Any id_* key discoverable in ~/.ssh/ (if allowed).
+ - Plain username/password auth, if a password was given.
+
+ (The password might be needed to unlock a private key [if 'passphrase'
+ isn't also given], or for two-factor authentication [for which it is
+ required].)
+ """
+ saved_exception = None
+ two_factor = False
+ allowed_types = set()
+ two_factor_types = {"keyboard-interactive", "password"}
+ if passphrase is None and password is not None:
+ passphrase = password
+
+ # If GSS-API support and GSS-PI Key Exchange was performed, we attempt
+ # authentication with gssapi-keyex.
+ if gss_kex and self._transport.gss_kex_used:
+ try:
+ self._transport.auth_gssapi_keyex(username)
+ return
+ except Exception as e:
+ saved_exception = e
+
+ # Try GSS-API authentication (gssapi-with-mic) only if GSS-API Key
+ # Exchange is not performed, because if we use GSS-API for the key
+ # exchange, there is already a fully established GSS-API context, so
+ # why should we do that again?
+ if gss_auth:
+ try:
+ return self._transport.auth_gssapi_with_mic(
+ username, gss_host, gss_deleg_creds
+ )
+ except Exception as e:
+ saved_exception = e
+
+ if pkey is not None:
+ try:
+ self._log(
+ DEBUG,
+ "Trying SSH key {}".format(
+ hexlify(pkey.get_fingerprint())
+ ),
+ )
+ allowed_types = set(
+ self._transport.auth_publickey(username, pkey)
+ )
+ two_factor = allowed_types & two_factor_types
+ if not two_factor:
+ return
+ except SSHException as e:
+ saved_exception = e
+
+ if not two_factor:
+ for key_filename in key_filenames:
+ # TODO 4.0: leverage PKey.from_path() if we don't end up just
+ # killing SSHClient entirely
+ for pkey_class in (RSAKey, ECDSAKey, Ed25519Key):
+ try:
+ key = self._key_from_filepath(
+ key_filename, pkey_class, passphrase
+ )
+ allowed_types = set(
+ self._transport.auth_publickey(username, key)
+ )
+ two_factor = allowed_types & two_factor_types
+ if not two_factor:
+ return
+ break
+ except SSHException as e:
+ saved_exception = e
+
+ if not two_factor and allow_agent:
+ if self._agent is None:
+ self._agent = Agent()
+
+ for key in self._agent.get_keys():
+ try:
+ id_ = hexlify(key.get_fingerprint())
+ self._log(DEBUG, "Trying SSH agent key {}".format(id_))
+ # for 2-factor auth a successfully auth'd key password
+ # will return an allowed 2fac auth method
+ allowed_types = set(
+ self._transport.auth_publickey(username, key)
+ )
+ two_factor = allowed_types & two_factor_types
+ if not two_factor:
+ return
+ break
+ except SSHException as e:
+ saved_exception = e
+
+ if not two_factor:
+ keyfiles = []
+
+ for keytype, name in [
+ (RSAKey, "rsa"),
+ (ECDSAKey, "ecdsa"),
+ (Ed25519Key, "ed25519"),
+ ]:
+ # ~/ssh/ is for windows
+ for directory in [".ssh", "ssh"]:
+ full_path = os.path.expanduser(
+ "~/{}/id_{}".format(directory, name)
+ )
+ if os.path.isfile(full_path):
+ # TODO: only do this append if below did not run
+ keyfiles.append((keytype, full_path))
+ if os.path.isfile(full_path + "-cert.pub"):
+ keyfiles.append((keytype, full_path + "-cert.pub"))
+
+ if not look_for_keys:
+ keyfiles = []
+
+ for pkey_class, filename in keyfiles:
+ try:
+ key = self._key_from_filepath(
+ filename, pkey_class, passphrase
+ )
+ # for 2-factor auth a successfully auth'd key will result
+ # in ['password']
+ allowed_types = set(
+ self._transport.auth_publickey(username, key)
+ )
+ two_factor = allowed_types & two_factor_types
+ if not two_factor:
+ return
+ break
+ except (SSHException, IOError) as e:
+ saved_exception = e
+
+ if password is not None:
+ try:
+ self._transport.auth_password(username, password)
+ return
+ except SSHException as e:
+ saved_exception = e
+ elif two_factor:
+ try:
+ self._transport.auth_interactive_dumb(username)
+ return
+ except SSHException as e:
+ saved_exception = e
+
+ # if we got an auth-failed exception earlier, re-raise it
+ if saved_exception is not None:
+ raise saved_exception
+ raise SSHException("No authentication methods available")
+
+ def _log(self, level, msg):
+ self._transport._log(level, msg)
+
+
+class MissingHostKeyPolicy:
+ """
+ Interface for defining the policy that `.SSHClient` should use when the
+ SSH server's hostname is not in either the system host keys or the
+ application's keys. Pre-made classes implement policies for automatically
+ adding the key to the application's `.HostKeys` object (`.AutoAddPolicy`),
+ and for automatically rejecting the key (`.RejectPolicy`).
+
+ This function may be used to ask the user to verify the key, for example.
+ """
+
+ def missing_host_key(self, client, hostname, key):
+ """
+ Called when an `.SSHClient` receives a server key for a server that
+ isn't in either the system or local `.HostKeys` object. To accept
+ the key, simply return. To reject, raised an exception (which will
+ be passed to the calling application).
+ """
+ pass
+
+
+class AutoAddPolicy(MissingHostKeyPolicy):
+ """
+ Policy for automatically adding the hostname and new host key to the
+ local `.HostKeys` object, and saving it. This is used by `.SSHClient`.
+ """
+
+ def missing_host_key(self, client, hostname, key):
+ client._host_keys.add(hostname, key.get_name(), key)
+ if client._host_keys_filename is not None:
+ client.save_host_keys(client._host_keys_filename)
+ client._log(
+ DEBUG,
+ "Adding {} host key for {}: {}".format(
+ key.get_name(), hostname, hexlify(key.get_fingerprint())
+ ),
+ )
+
+
+class RejectPolicy(MissingHostKeyPolicy):
+ """
+ Policy for automatically rejecting the unknown hostname & key. This is
+ used by `.SSHClient`.
+ """
+
+ def missing_host_key(self, client, hostname, key):
+ client._log(
+ DEBUG,
+ "Rejecting {} host key for {}: {}".format(
+ key.get_name(), hostname, hexlify(key.get_fingerprint())
+ ),
+ )
+ raise SSHException(
+ "Server {!r} not found in known_hosts".format(hostname)
+ )
+
+
+class WarningPolicy(MissingHostKeyPolicy):
+ """
+ Policy for logging a Python-style warning for an unknown host key, but
+ accepting it. This is used by `.SSHClient`.
+ """
+
+ def missing_host_key(self, client, hostname, key):
+ warnings.warn(
+ "Unknown {} host key for {}: {}".format(
+ key.get_name(), hostname, hexlify(key.get_fingerprint())
+ )
+ )
diff --git a/lib/paramiko/common.py b/lib/paramiko/common.py
new file mode 100644
index 0000000..b57149b
--- /dev/null
+++ b/lib/paramiko/common.py
@@ -0,0 +1,245 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Common constants and global variables.
+"""
+import logging
+import struct
+
+#
+# Formerly of py3compat.py. May be fully delete'able with a deeper look?
+#
+
+
+def byte_chr(c):
+ assert isinstance(c, int)
+ return struct.pack("B", c)
+
+
+def byte_mask(c, mask):
+ assert isinstance(c, int)
+ return struct.pack("B", c & mask)
+
+
+def byte_ord(c):
+ # In case we're handed a string instead of an int.
+ if not isinstance(c, int):
+ c = ord(c)
+ return c
+
+
+(
+ MSG_DISCONNECT,
+ MSG_IGNORE,
+ MSG_UNIMPLEMENTED,
+ MSG_DEBUG,
+ MSG_SERVICE_REQUEST,
+ MSG_SERVICE_ACCEPT,
+ MSG_EXT_INFO,
+) = range(1, 8)
+(MSG_KEXINIT, MSG_NEWKEYS) = range(20, 22)
+(
+ MSG_USERAUTH_REQUEST,
+ MSG_USERAUTH_FAILURE,
+ MSG_USERAUTH_SUCCESS,
+ MSG_USERAUTH_BANNER,
+) = range(50, 54)
+MSG_USERAUTH_PK_OK = 60
+(MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE) = range(60, 62)
+(MSG_USERAUTH_GSSAPI_RESPONSE, MSG_USERAUTH_GSSAPI_TOKEN) = range(60, 62)
+(
+ MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE,
+ MSG_USERAUTH_GSSAPI_ERROR,
+ MSG_USERAUTH_GSSAPI_ERRTOK,
+ MSG_USERAUTH_GSSAPI_MIC,
+) = range(63, 67)
+HIGHEST_USERAUTH_MESSAGE_ID = 79
+(MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE) = range(80, 83)
+(
+ MSG_CHANNEL_OPEN,
+ MSG_CHANNEL_OPEN_SUCCESS,
+ MSG_CHANNEL_OPEN_FAILURE,
+ MSG_CHANNEL_WINDOW_ADJUST,
+ MSG_CHANNEL_DATA,
+ MSG_CHANNEL_EXTENDED_DATA,
+ MSG_CHANNEL_EOF,
+ MSG_CHANNEL_CLOSE,
+ MSG_CHANNEL_REQUEST,
+ MSG_CHANNEL_SUCCESS,
+ MSG_CHANNEL_FAILURE,
+) = range(90, 101)
+
+cMSG_DISCONNECT = byte_chr(MSG_DISCONNECT)
+cMSG_IGNORE = byte_chr(MSG_IGNORE)
+cMSG_UNIMPLEMENTED = byte_chr(MSG_UNIMPLEMENTED)
+cMSG_DEBUG = byte_chr(MSG_DEBUG)
+cMSG_SERVICE_REQUEST = byte_chr(MSG_SERVICE_REQUEST)
+cMSG_SERVICE_ACCEPT = byte_chr(MSG_SERVICE_ACCEPT)
+cMSG_EXT_INFO = byte_chr(MSG_EXT_INFO)
+cMSG_KEXINIT = byte_chr(MSG_KEXINIT)
+cMSG_NEWKEYS = byte_chr(MSG_NEWKEYS)
+cMSG_USERAUTH_REQUEST = byte_chr(MSG_USERAUTH_REQUEST)
+cMSG_USERAUTH_FAILURE = byte_chr(MSG_USERAUTH_FAILURE)
+cMSG_USERAUTH_SUCCESS = byte_chr(MSG_USERAUTH_SUCCESS)
+cMSG_USERAUTH_BANNER = byte_chr(MSG_USERAUTH_BANNER)
+cMSG_USERAUTH_PK_OK = byte_chr(MSG_USERAUTH_PK_OK)
+cMSG_USERAUTH_INFO_REQUEST = byte_chr(MSG_USERAUTH_INFO_REQUEST)
+cMSG_USERAUTH_INFO_RESPONSE = byte_chr(MSG_USERAUTH_INFO_RESPONSE)
+cMSG_USERAUTH_GSSAPI_RESPONSE = byte_chr(MSG_USERAUTH_GSSAPI_RESPONSE)
+cMSG_USERAUTH_GSSAPI_TOKEN = byte_chr(MSG_USERAUTH_GSSAPI_TOKEN)
+cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE = byte_chr(
+ MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE
+)
+cMSG_USERAUTH_GSSAPI_ERROR = byte_chr(MSG_USERAUTH_GSSAPI_ERROR)
+cMSG_USERAUTH_GSSAPI_ERRTOK = byte_chr(MSG_USERAUTH_GSSAPI_ERRTOK)
+cMSG_USERAUTH_GSSAPI_MIC = byte_chr(MSG_USERAUTH_GSSAPI_MIC)
+cMSG_GLOBAL_REQUEST = byte_chr(MSG_GLOBAL_REQUEST)
+cMSG_REQUEST_SUCCESS = byte_chr(MSG_REQUEST_SUCCESS)
+cMSG_REQUEST_FAILURE = byte_chr(MSG_REQUEST_FAILURE)
+cMSG_CHANNEL_OPEN = byte_chr(MSG_CHANNEL_OPEN)
+cMSG_CHANNEL_OPEN_SUCCESS = byte_chr(MSG_CHANNEL_OPEN_SUCCESS)
+cMSG_CHANNEL_OPEN_FAILURE = byte_chr(MSG_CHANNEL_OPEN_FAILURE)
+cMSG_CHANNEL_WINDOW_ADJUST = byte_chr(MSG_CHANNEL_WINDOW_ADJUST)
+cMSG_CHANNEL_DATA = byte_chr(MSG_CHANNEL_DATA)
+cMSG_CHANNEL_EXTENDED_DATA = byte_chr(MSG_CHANNEL_EXTENDED_DATA)
+cMSG_CHANNEL_EOF = byte_chr(MSG_CHANNEL_EOF)
+cMSG_CHANNEL_CLOSE = byte_chr(MSG_CHANNEL_CLOSE)
+cMSG_CHANNEL_REQUEST = byte_chr(MSG_CHANNEL_REQUEST)
+cMSG_CHANNEL_SUCCESS = byte_chr(MSG_CHANNEL_SUCCESS)
+cMSG_CHANNEL_FAILURE = byte_chr(MSG_CHANNEL_FAILURE)
+
+# for debugging:
+MSG_NAMES = {
+ MSG_DISCONNECT: "disconnect",
+ MSG_IGNORE: "ignore",
+ MSG_UNIMPLEMENTED: "unimplemented",
+ MSG_DEBUG: "debug",
+ MSG_SERVICE_REQUEST: "service-request",
+ MSG_SERVICE_ACCEPT: "service-accept",
+ MSG_KEXINIT: "kexinit",
+ MSG_EXT_INFO: "ext-info",
+ MSG_NEWKEYS: "newkeys",
+ 30: "kex30",
+ 31: "kex31",
+ 32: "kex32",
+ 33: "kex33",
+ 34: "kex34",
+ 40: "kex40",
+ 41: "kex41",
+ MSG_USERAUTH_REQUEST: "userauth-request",
+ MSG_USERAUTH_FAILURE: "userauth-failure",
+ MSG_USERAUTH_SUCCESS: "userauth-success",
+ MSG_USERAUTH_BANNER: "userauth--banner",
+ MSG_USERAUTH_PK_OK: "userauth-60(pk-ok/info-request)",
+ MSG_USERAUTH_INFO_RESPONSE: "userauth-info-response",
+ MSG_GLOBAL_REQUEST: "global-request",
+ MSG_REQUEST_SUCCESS: "request-success",
+ MSG_REQUEST_FAILURE: "request-failure",
+ MSG_CHANNEL_OPEN: "channel-open",
+ MSG_CHANNEL_OPEN_SUCCESS: "channel-open-success",
+ MSG_CHANNEL_OPEN_FAILURE: "channel-open-failure",
+ MSG_CHANNEL_WINDOW_ADJUST: "channel-window-adjust",
+ MSG_CHANNEL_DATA: "channel-data",
+ MSG_CHANNEL_EXTENDED_DATA: "channel-extended-data",
+ MSG_CHANNEL_EOF: "channel-eof",
+ MSG_CHANNEL_CLOSE: "channel-close",
+ MSG_CHANNEL_REQUEST: "channel-request",
+ MSG_CHANNEL_SUCCESS: "channel-success",
+ MSG_CHANNEL_FAILURE: "channel-failure",
+ MSG_USERAUTH_GSSAPI_RESPONSE: "userauth-gssapi-response",
+ MSG_USERAUTH_GSSAPI_TOKEN: "userauth-gssapi-token",
+ MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE: "userauth-gssapi-exchange-complete",
+ MSG_USERAUTH_GSSAPI_ERROR: "userauth-gssapi-error",
+ MSG_USERAUTH_GSSAPI_ERRTOK: "userauth-gssapi-error-token",
+ MSG_USERAUTH_GSSAPI_MIC: "userauth-gssapi-mic",
+}
+
+
+# authentication request return codes:
+AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED = range(3)
+
+
+# channel request failed reasons:
+(
+ OPEN_SUCCEEDED,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED,
+ OPEN_FAILED_CONNECT_FAILED,
+ OPEN_FAILED_UNKNOWN_CHANNEL_TYPE,
+ OPEN_FAILED_RESOURCE_SHORTAGE,
+) = range(0, 5)
+
+
+CONNECTION_FAILED_CODE = {
+ 1: "Administratively prohibited",
+ 2: "Connect failed",
+ 3: "Unknown channel type",
+ 4: "Resource shortage",
+}
+
+
+(
+ DISCONNECT_SERVICE_NOT_AVAILABLE,
+ DISCONNECT_AUTH_CANCELLED_BY_USER,
+ DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
+) = (7, 13, 14)
+
+zero_byte = byte_chr(0)
+one_byte = byte_chr(1)
+four_byte = byte_chr(4)
+max_byte = byte_chr(0xFF)
+cr_byte = byte_chr(13)
+linefeed_byte = byte_chr(10)
+crlf = cr_byte + linefeed_byte
+cr_byte_value = 13
+linefeed_byte_value = 10
+
+
+xffffffff = 0xFFFFFFFF
+x80000000 = 0x80000000
+o666 = 438
+o660 = 432
+o644 = 420
+o600 = 384
+o777 = 511
+o700 = 448
+o70 = 56
+
+DEBUG = logging.DEBUG
+INFO = logging.INFO
+WARNING = logging.WARNING
+ERROR = logging.ERROR
+CRITICAL = logging.CRITICAL
+
+# Common IO/select/etc sleep period, in seconds
+io_sleep = 0.01
+
+DEFAULT_WINDOW_SIZE = 64 * 2**15
+DEFAULT_MAX_PACKET_SIZE = 2**15
+
+# lower bound on the max packet size we'll accept from the remote host
+# Minimum packet size is 32768 bytes according to
+# http://www.ietf.org/rfc/rfc4254.txt
+MIN_WINDOW_SIZE = 2**15
+
+# However, according to http://www.ietf.org/rfc/rfc4253.txt it is perfectly
+# legal to accept a size much smaller, as OpenSSH client does as size 16384.
+MIN_PACKET_SIZE = 2**12
+
+# Max windows size according to http://www.ietf.org/rfc/rfc4254.txt
+MAX_WINDOW_SIZE = 2**32 - 1
diff --git a/lib/paramiko/compress.py b/lib/paramiko/compress.py
new file mode 100644
index 0000000..18ff484
--- /dev/null
+++ b/lib/paramiko/compress.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Compression implementations for a Transport.
+"""
+
+import zlib
+
+
+class ZlibCompressor:
+ def __init__(self):
+ # Use the default level of zlib compression
+ self.z = zlib.compressobj()
+
+ def __call__(self, data):
+ return self.z.compress(data) + self.z.flush(zlib.Z_FULL_FLUSH)
+
+
+class ZlibDecompressor:
+ def __init__(self):
+ self.z = zlib.decompressobj()
+
+ def __call__(self, data):
+ return self.z.decompress(data)
diff --git a/lib/paramiko/config.py b/lib/paramiko/config.py
new file mode 100644
index 0000000..8ab55c6
--- /dev/null
+++ b/lib/paramiko/config.py
@@ -0,0 +1,696 @@
+# Copyright (C) 2006-2007 Robey Pointer
+# Copyright (C) 2012 Olle Lundberg
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Configuration file (aka ``ssh_config``) support.
+"""
+
+import fnmatch
+import getpass
+import os
+import re
+import shlex
+import socket
+from hashlib import sha1
+from io import StringIO
+from functools import partial
+
+invoke, invoke_import_error = None, None
+try:
+ import invoke
+except ImportError as e:
+ invoke_import_error = e
+
+from .ssh_exception import CouldNotCanonicalize, ConfigParseError
+
+
+SSH_PORT = 22
+
+
+class SSHConfig:
+ """
+ Representation of config information as stored in the format used by
+ OpenSSH. Queries can be made via `lookup`. The format is described in
+ OpenSSH's ``ssh_config`` man page. This class is provided primarily as a
+ convenience to posix users (since the OpenSSH format is a de-facto
+ standard on posix) but should work fine on Windows too.
+
+ .. versionadded:: 1.6
+ """
+
+ SETTINGS_REGEX = re.compile(r"(\w+)(?:\s*=\s*|\s+)(.+)")
+
+ # TODO: do a full scan of ssh.c & friends to make sure we're fully
+ # compatible across the board, e.g. OpenSSH 8.1 added %n to ProxyCommand.
+ TOKENS_BY_CONFIG_KEY = {
+ "controlpath": ["%C", "%h", "%l", "%L", "%n", "%p", "%r", "%u"],
+ "hostname": ["%h"],
+ "identityfile": ["%C", "~", "%d", "%h", "%l", "%u", "%r"],
+ "proxycommand": ["~", "%h", "%p", "%r"],
+ "proxyjump": ["%h", "%p", "%r"],
+ # Doesn't seem worth making this 'special' for now, it will fit well
+ # enough (no actual match-exec config key to be confused with).
+ "match-exec": ["%C", "%d", "%h", "%L", "%l", "%n", "%p", "%r", "%u"],
+ }
+
+ def __init__(self):
+ """
+ Create a new OpenSSH config object.
+
+ Note: the newer alternate constructors `from_path`, `from_file` and
+ `from_text` are simpler to use, as they parse on instantiation. For
+ example, instead of::
+
+ config = SSHConfig()
+ config.parse(open("some-path.config")
+
+ you could::
+
+ config = SSHConfig.from_file(open("some-path.config"))
+ # Or more directly:
+ config = SSHConfig.from_path("some-path.config")
+ # Or if you have arbitrary ssh_config text from some other source:
+ config = SSHConfig.from_text("Host foo\\n\\tUser bar")
+ """
+ self._config = []
+
+ @classmethod
+ def from_text(cls, text):
+ """
+ Create a new, parsed `SSHConfig` from ``text`` string.
+
+ .. versionadded:: 2.7
+ """
+ return cls.from_file(StringIO(text))
+
+ @classmethod
+ def from_path(cls, path):
+ """
+ Create a new, parsed `SSHConfig` from the file found at ``path``.
+
+ .. versionadded:: 2.7
+ """
+ with open(path) as flo:
+ return cls.from_file(flo)
+
+ @classmethod
+ def from_file(cls, flo):
+ """
+ Create a new, parsed `SSHConfig` from file-like object ``flo``.
+
+ .. versionadded:: 2.7
+ """
+ obj = cls()
+ obj.parse(flo)
+ return obj
+
+ def parse(self, file_obj):
+ """
+ Read an OpenSSH config from the given file object.
+
+ :param file_obj: a file-like object to read the config file from
+ """
+ # Start out w/ implicit/anonymous global host-like block to hold
+ # anything not contained by an explicit one.
+ context = {"host": ["*"], "config": {}}
+ for line in file_obj:
+ # Strip any leading or trailing whitespace from the line.
+ # Refer to https://github.com/paramiko/paramiko/issues/499
+ line = line.strip()
+ # Skip blanks, comments
+ if not line or line.startswith("#"):
+ continue
+
+ # Parse line into key, value
+ match = re.match(self.SETTINGS_REGEX, line)
+ if not match:
+ raise ConfigParseError("Unparsable line {}".format(line))
+ key = match.group(1).lower()
+ value = match.group(2)
+
+ # Host keyword triggers switch to new block/context
+ if key in ("host", "match"):
+ self._config.append(context)
+ context = {"config": {}}
+ if key == "host":
+ # TODO 4.0: make these real objects or at least name this
+ # "hosts" to acknowledge it's an iterable. (Doing so prior
+ # to 3.0, despite it being a private API, feels bad -
+ # surely such an old codebase has folks actually relying on
+ # these keys.)
+ context["host"] = self._get_hosts(value)
+ else:
+ context["matches"] = self._get_matches(value)
+ # Special-case for noop ProxyCommands
+ elif key == "proxycommand" and value.lower() == "none":
+ # Store 'none' as None - not as a string implying that the
+ # proxycommand is the literal shell command "none"!
+ context["config"][key] = None
+ # All other keywords get stored, directly or via append
+ else:
+ if value.startswith('"') and value.endswith('"'):
+ value = value[1:-1]
+
+ # identityfile, localforward, remoteforward keys are special
+ # cases, since they are allowed to be specified multiple times
+ # and they should be tried in order of specification.
+ if key in ["identityfile", "localforward", "remoteforward"]:
+ if key in context["config"]:
+ context["config"][key].append(value)
+ else:
+ context["config"][key] = [value]
+ elif key not in context["config"]:
+ context["config"][key] = value
+ # Store last 'open' block and we're done
+ self._config.append(context)
+
+ def lookup(self, hostname):
+ """
+ Return a dict (`SSHConfigDict`) of config options for a given hostname.
+
+ The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
+ For each parameter, the first obtained value will be used. The
+ configuration files contain sections separated by ``Host`` and/or
+ ``Match`` specifications, and that section is only applied for hosts
+ which match the given patterns or keywords
+
+ Since the first obtained value for each parameter is used, more host-
+ specific declarations should be given near the beginning of the file,
+ and general defaults at the end.
+
+ The keys in the returned dict are all normalized to lowercase (look for
+ ``"port"``, not ``"Port"``. The values are processed according to the
+ rules for substitution variable expansion in ``ssh_config``.
+
+ Finally, please see the docs for `SSHConfigDict` for deeper info on
+ features such as optional type conversion methods, e.g.::
+
+ conf = my_config.lookup('myhost')
+ assert conf['passwordauthentication'] == 'yes'
+ assert conf.as_bool('passwordauthentication') is True
+
+ .. note::
+ If there is no explicitly configured ``HostName`` value, it will be
+ set to the being-looked-up hostname, which is as close as we can
+ get to OpenSSH's behavior around that particular option.
+
+ :param str hostname: the hostname to lookup
+
+ .. versionchanged:: 2.5
+ Returns `SSHConfigDict` objects instead of dict literals.
+ .. versionchanged:: 2.7
+ Added canonicalization support.
+ .. versionchanged:: 2.7
+ Added ``Match`` support.
+ .. versionchanged:: 3.3
+ Added ``Match final`` support.
+ """
+ # First pass
+ options = self._lookup(hostname=hostname)
+ # Inject HostName if it was not set (this used to be done incidentally
+ # during tokenization, for some reason).
+ if "hostname" not in options:
+ options["hostname"] = hostname
+ # Handle canonicalization
+ canon = options.get("canonicalizehostname", None) in ("yes", "always")
+ maxdots = int(options.get("canonicalizemaxdots", 1))
+ if canon and hostname.count(".") <= maxdots:
+ # NOTE: OpenSSH manpage does not explicitly state this, but its
+ # implementation for CanonicalDomains is 'split on any whitespace'.
+ domains = options["canonicaldomains"].split()
+ hostname = self.canonicalize(hostname, options, domains)
+ # Overwrite HostName again here (this is also what OpenSSH does)
+ options["hostname"] = hostname
+ options = self._lookup(
+ hostname, options, canonical=True, final=True
+ )
+ else:
+ options = self._lookup(
+ hostname, options, canonical=False, final=True
+ )
+ return options
+
+ def _lookup(self, hostname, options=None, canonical=False, final=False):
+ # Init
+ if options is None:
+ options = SSHConfigDict()
+ # Iterate all stanzas, applying any that match, in turn (so that things
+ # like Match can reference currently understood state)
+ for context in self._config:
+ if not (
+ self._pattern_matches(context.get("host", []), hostname)
+ or self._does_match(
+ context.get("matches", []),
+ hostname,
+ canonical,
+ final,
+ options,
+ )
+ ):
+ continue
+ for key, value in context["config"].items():
+ if key not in options:
+ # Create a copy of the original value,
+ # else it will reference the original list
+ # in self._config and update that value too
+ # when the extend() is being called.
+ options[key] = value[:] if value is not None else value
+ elif key == "identityfile":
+ options[key].extend(
+ x for x in value if x not in options[key]
+ )
+ if final:
+ # Expand variables in resulting values
+ # (besides 'Match exec' which was already handled above)
+ options = self._expand_variables(options, hostname)
+ return options
+
+ def canonicalize(self, hostname, options, domains):
+ """
+ Return canonicalized version of ``hostname``.
+
+ :param str hostname: Target hostname.
+ :param options: An `SSHConfigDict` from a previous lookup pass.
+ :param domains: List of domains (e.g. ``["paramiko.org"]``).
+
+ :returns: A canonicalized hostname if one was found, else ``None``.
+
+ .. versionadded:: 2.7
+ """
+ found = False
+ for domain in domains:
+ candidate = "{}.{}".format(hostname, domain)
+ family_specific = _addressfamily_host_lookup(candidate, options)
+ if family_specific is not None:
+ # TODO: would we want to dig deeper into other results? e.g. to
+ # find something that satisfies PermittedCNAMEs when that is
+ # implemented?
+ found = family_specific[0]
+ else:
+ # TODO: what does ssh use here and is there a reason to use
+ # that instead of gethostbyname?
+ try:
+ found = socket.gethostbyname(candidate)
+ except socket.gaierror:
+ pass
+ if found:
+ # TODO: follow CNAME (implied by found != candidate?) if
+ # CanonicalizePermittedCNAMEs allows it
+ return candidate
+ # If we got here, it means canonicalization failed.
+ # When CanonicalizeFallbackLocal is undefined or 'yes', we just spit
+ # back the original hostname.
+ if options.get("canonicalizefallbacklocal", "yes") == "yes":
+ return hostname
+ # And here, we failed AND fallback was set to a non-yes value, so we
+ # need to get mad.
+ raise CouldNotCanonicalize(hostname)
+
+ def get_hostnames(self):
+ """
+ Return the set of literal hostnames defined in the SSH config (both
+ explicit hostnames and wildcard entries).
+ """
+ hosts = set()
+ for entry in self._config:
+ hosts.update(entry["host"])
+ return hosts
+
+ def _pattern_matches(self, patterns, target):
+ # Convenience auto-splitter if not already a list
+ if hasattr(patterns, "split"):
+ patterns = patterns.split(",")
+ match = False
+ for pattern in patterns:
+ # Short-circuit if target matches a negated pattern
+ if pattern.startswith("!") and fnmatch.fnmatch(
+ target, pattern[1:]
+ ):
+ return False
+ # Flag a match, but continue (in case of later negation) if regular
+ # match occurs
+ elif fnmatch.fnmatch(target, pattern):
+ match = True
+ return match
+
+ def _does_match(
+ self, match_list, target_hostname, canonical, final, options
+ ):
+ matched = []
+ candidates = match_list[:]
+ local_username = getpass.getuser()
+ while candidates:
+ candidate = candidates.pop(0)
+ passed = None
+ # Obtain latest host/user value every loop, so later Match may
+ # reference values assigned within a prior Match.
+ configured_host = options.get("hostname", None)
+ configured_user = options.get("user", None)
+ type_, param = candidate["type"], candidate["param"]
+ # Canonical is a hard pass/fail based on whether this is a
+ # canonicalized re-lookup.
+ if type_ == "canonical":
+ if self._should_fail(canonical, candidate):
+ return False
+ if type_ == "final":
+ passed = final
+ # The parse step ensures we only see this by itself or after
+ # canonical, so it's also an easy hard pass. (No negation here as
+ # that would be uh, pretty weird?)
+ elif type_ == "all":
+ return True
+ # From here, we are testing various non-hard criteria,
+ # short-circuiting only on fail
+ elif type_ == "host":
+ hostval = configured_host or target_hostname
+ passed = self._pattern_matches(param, hostval)
+ elif type_ == "originalhost":
+ passed = self._pattern_matches(param, target_hostname)
+ elif type_ == "user":
+ user = configured_user or local_username
+ passed = self._pattern_matches(param, user)
+ elif type_ == "localuser":
+ passed = self._pattern_matches(param, local_username)
+ elif type_ == "exec":
+ exec_cmd = self._tokenize(
+ options, target_hostname, "match-exec", param
+ )
+ # This is the laziest spot in which we can get mad about an
+ # inability to import Invoke.
+ if invoke is None:
+ raise invoke_import_error
+ # Like OpenSSH, we 'redirect' stdout but let stderr bubble up
+ passed = invoke.run(exec_cmd, hide="stdout", warn=True).ok
+ # Tackle any 'passed, but was negated' results from above
+ if passed is not None and self._should_fail(passed, candidate):
+ return False
+ # Made it all the way here? Everything matched!
+ matched.append(candidate)
+ # Did anything match? (To be treated as bool, usually.)
+ return matched
+
+ def _should_fail(self, would_pass, candidate):
+ return would_pass if candidate["negate"] else not would_pass
+
+ def _tokenize(self, config, target_hostname, key, value):
+ """
+ Tokenize a string based on current config/hostname data.
+
+ :param config: Current config data.
+ :param target_hostname: Original target connection hostname.
+ :param key: Config key being tokenized (used to filter token list).
+ :param value: Config value being tokenized.
+
+ :returns: The tokenized version of the input ``value`` string.
+ """
+ allowed_tokens = self._allowed_tokens(key)
+ # Short-circuit if no tokenization possible
+ if not allowed_tokens:
+ return value
+ # Obtain potentially configured hostname, for use with %h.
+ # Special-case where we are tokenizing the hostname itself, to avoid
+ # replacing %h with a %h-bearing value, etc.
+ configured_hostname = target_hostname
+ if key != "hostname":
+ configured_hostname = config.get("hostname", configured_hostname)
+ # Ditto the rest of the source values
+ if "port" in config:
+ port = config["port"]
+ else:
+ port = SSH_PORT
+ user = getpass.getuser()
+ if "user" in config:
+ remoteuser = config["user"]
+ else:
+ remoteuser = user
+ local_hostname = socket.gethostname().split(".")[0]
+ local_fqdn = LazyFqdn(config, local_hostname)
+ homedir = os.path.expanduser("~")
+ tohash = local_hostname + target_hostname + repr(port) + remoteuser
+ # The actual tokens!
+ replacements = {
+ # TODO: %%???
+ "%C": sha1(tohash.encode()).hexdigest(),
+ "%d": homedir,
+ "%h": configured_hostname,
+ # TODO: %i?
+ "%L": local_hostname,
+ "%l": local_fqdn,
+ # also this is pseudo buggy when not in Match exec mode so document
+ # that. also WHY is that the case?? don't we do all of this late?
+ "%n": target_hostname,
+ "%p": port,
+ "%r": remoteuser,
+ # TODO: %T? don't believe this is possible however
+ "%u": user,
+ "~": homedir,
+ }
+ # Do the thing with the stuff
+ tokenized = value
+ for find, replace in replacements.items():
+ if find not in allowed_tokens:
+ continue
+ tokenized = tokenized.replace(find, str(replace))
+ # TODO: log? eg that value -> tokenized
+ return tokenized
+
+ def _allowed_tokens(self, key):
+ """
+ Given config ``key``, return list of token strings to tokenize.
+
+ .. note::
+ This feels like it wants to eventually go away, but is used to
+ preserve as-strict-as-possible compatibility with OpenSSH, which
+ for whatever reason only applies some tokens to some config keys.
+ """
+ return self.TOKENS_BY_CONFIG_KEY.get(key, [])
+
+ def _expand_variables(self, config, target_hostname):
+ """
+ Return a dict of config options with expanded substitutions
+ for a given original & current target hostname.
+
+ Please refer to :doc:`/api/config` for details.
+
+ :param dict config: the currently parsed config
+ :param str hostname: the hostname whose config is being looked up
+ """
+ for k in config:
+ if config[k] is None:
+ continue
+ tokenizer = partial(self._tokenize, config, target_hostname, k)
+ if isinstance(config[k], list):
+ for i, value in enumerate(config[k]):
+ config[k][i] = tokenizer(value)
+ else:
+ config[k] = tokenizer(config[k])
+ return config
+
+ def _get_hosts(self, host):
+ """
+ Return a list of host_names from host value.
+ """
+ try:
+ return shlex.split(host)
+ except ValueError:
+ raise ConfigParseError("Unparsable host {}".format(host))
+
+ def _get_matches(self, match):
+ """
+ Parse a specific Match config line into a list-of-dicts for its values.
+
+ Performs some parse-time validation as well.
+ """
+ matches = []
+ tokens = shlex.split(match)
+ while tokens:
+ match = {"type": None, "param": None, "negate": False}
+ type_ = tokens.pop(0)
+ # Handle per-keyword negation
+ if type_.startswith("!"):
+ match["negate"] = True
+ type_ = type_[1:]
+ match["type"] = type_
+ # all/canonical have no params (everything else does)
+ if type_ in ("all", "canonical", "final"):
+ matches.append(match)
+ continue
+ if not tokens:
+ raise ConfigParseError(
+ "Missing parameter to Match '{}' keyword".format(type_)
+ )
+ match["param"] = tokens.pop(0)
+ matches.append(match)
+ # Perform some (easier to do now than in the middle) validation that is
+ # better handled here than at lookup time.
+ keywords = [x["type"] for x in matches]
+ if "all" in keywords:
+ allowable = ("all", "canonical")
+ ok, bad = (
+ list(filter(lambda x: x in allowable, keywords)),
+ list(filter(lambda x: x not in allowable, keywords)),
+ )
+ err = None
+ if any(bad):
+ err = "Match does not allow 'all' mixed with anything but 'canonical'" # noqa
+ elif "canonical" in ok and ok.index("canonical") > ok.index("all"):
+ err = "Match does not allow 'all' before 'canonical'"
+ if err is not None:
+ raise ConfigParseError(err)
+ return matches
+
+
+def _addressfamily_host_lookup(hostname, options):
+ """
+ Try looking up ``hostname`` in an IPv4 or IPv6 specific manner.
+
+ This is an odd duck due to needing use in two divergent use cases. It looks
+ up ``AddressFamily`` in ``options`` and if it is ``inet`` or ``inet6``,
+ this function uses `socket.getaddrinfo` to perform a family-specific
+ lookup, returning the result if successful.
+
+ In any other situation -- lookup failure, or ``AddressFamily`` being
+ unspecified or ``any`` -- ``None`` is returned instead and the caller is
+ expected to do something situation-appropriate like calling
+ `socket.gethostbyname`.
+
+ :param str hostname: Hostname to look up.
+ :param options: `SSHConfigDict` instance w/ parsed options.
+ :returns: ``getaddrinfo``-style tuples, or ``None``, depending.
+ """
+ address_family = options.get("addressfamily", "any").lower()
+ if address_family == "any":
+ return
+ try:
+ family = socket.AF_INET6
+ if address_family == "inet":
+ family = socket.AF_INET
+ return socket.getaddrinfo(
+ hostname,
+ None,
+ family,
+ socket.SOCK_DGRAM,
+ socket.IPPROTO_IP,
+ socket.AI_CANONNAME,
+ )
+ except socket.gaierror:
+ pass
+
+
+class LazyFqdn:
+ """
+ Returns the host's fqdn on request as string.
+ """
+
+ def __init__(self, config, host=None):
+ self.fqdn = None
+ self.config = config
+ self.host = host
+
+ def __str__(self):
+ if self.fqdn is None:
+ #
+ # If the SSH config contains AddressFamily, use that when
+ # determining the local host's FQDN. Using socket.getfqdn() from
+ # the standard library is the most general solution, but can
+ # result in noticeable delays on some platforms when IPv6 is
+ # misconfigured or not available, as it calls getaddrinfo with no
+ # address family specified, so both IPv4 and IPv6 are checked.
+ #
+
+ # Handle specific option
+ fqdn = None
+ results = _addressfamily_host_lookup(self.host, self.config)
+ if results is not None:
+ for res in results:
+ af, socktype, proto, canonname, sa = res
+ if canonname and "." in canonname:
+ fqdn = canonname
+ break
+ # Handle 'any' / unspecified / lookup failure
+ if fqdn is None:
+ fqdn = socket.getfqdn()
+ # Cache
+ self.fqdn = fqdn
+ return self.fqdn
+
+
+class SSHConfigDict(dict):
+ """
+ A dictionary wrapper/subclass for per-host configuration structures.
+
+ This class introduces some usage niceties for consumers of `SSHConfig`,
+ specifically around the issue of variable type conversions: normal value
+ access yields strings, but there are now methods such as `as_bool` and
+ `as_int` that yield casted values instead.
+
+ For example, given the following ``ssh_config`` file snippet::
+
+ Host foo.example.com
+ PasswordAuthentication no
+ Compression yes
+ ServerAliveInterval 60
+
+ the following code highlights how you can access the raw strings as well as
+ usefully Python type-casted versions (recalling that keys are all
+ normalized to lowercase first)::
+
+ my_config = SSHConfig()
+ my_config.parse(open('~/.ssh/config'))
+ conf = my_config.lookup('foo.example.com')
+
+ assert conf['passwordauthentication'] == 'no'
+ assert conf.as_bool('passwordauthentication') is False
+ assert conf['compression'] == 'yes'
+ assert conf.as_bool('compression') is True
+ assert conf['serveraliveinterval'] == '60'
+ assert conf.as_int('serveraliveinterval') == 60
+
+ .. versionadded:: 2.5
+ """
+
+ def as_bool(self, key):
+ """
+ Express given key's value as a boolean type.
+
+ Typically, this is used for ``ssh_config``'s pseudo-boolean values
+ which are either ``"yes"`` or ``"no"``. In such cases, ``"yes"`` yields
+ ``True`` and any other value becomes ``False``.
+
+ .. note::
+ If (for whatever reason) the stored value is already boolean in
+ nature, it's simply returned.
+
+ .. versionadded:: 2.5
+ """
+ val = self[key]
+ if isinstance(val, bool):
+ return val
+ return val.lower() == "yes"
+
+ def as_int(self, key):
+ """
+ Express given key's value as an integer, if possible.
+
+ This method will raise ``ValueError`` or similar if the value is not
+ int-appropriate, same as the builtin `int` type.
+
+ .. versionadded:: 2.5
+ """
+ return int(self[key])
diff --git a/lib/paramiko/ecdsakey.py b/lib/paramiko/ecdsakey.py
new file mode 100644
index 0000000..6fd95fa
--- /dev/null
+++ b/lib/paramiko/ecdsakey.py
@@ -0,0 +1,339 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+ECDSA keys
+"""
+
+from cryptography.exceptions import InvalidSignature, UnsupportedAlgorithm
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes, serialization
+from cryptography.hazmat.primitives.asymmetric import ec
+from cryptography.hazmat.primitives.asymmetric.utils import (
+ decode_dss_signature,
+ encode_dss_signature,
+)
+
+from paramiko.common import four_byte
+from paramiko.message import Message
+from paramiko.pkey import PKey
+from paramiko.ssh_exception import SSHException
+from paramiko.util import deflate_long
+
+
+class _ECDSACurve:
+ """
+ Represents a specific ECDSA Curve (nistp256, nistp384, etc).
+
+ Handles the generation of the key format identifier and the selection of
+ the proper hash function. Also grabs the proper curve from the 'ecdsa'
+ package.
+ """
+
+ def __init__(self, curve_class, nist_name):
+ self.nist_name = nist_name
+ self.key_length = curve_class.key_size
+
+ # Defined in RFC 5656 6.2
+ self.key_format_identifier = "ecdsa-sha2-" + self.nist_name
+
+ # Defined in RFC 5656 6.2.1
+ if self.key_length <= 256:
+ self.hash_object = hashes.SHA256
+ elif self.key_length <= 384:
+ self.hash_object = hashes.SHA384
+ else:
+ self.hash_object = hashes.SHA512
+
+ self.curve_class = curve_class
+
+
+class _ECDSACurveSet:
+ """
+ A collection to hold the ECDSA curves. Allows querying by oid and by key
+ format identifier. The two ways in which ECDSAKey needs to be able to look
+ up curves.
+ """
+
+ def __init__(self, ecdsa_curves):
+ self.ecdsa_curves = ecdsa_curves
+
+ def get_key_format_identifier_list(self):
+ return [curve.key_format_identifier for curve in self.ecdsa_curves]
+
+ def get_by_curve_class(self, curve_class):
+ for curve in self.ecdsa_curves:
+ if curve.curve_class == curve_class:
+ return curve
+
+ def get_by_key_format_identifier(self, key_format_identifier):
+ for curve in self.ecdsa_curves:
+ if curve.key_format_identifier == key_format_identifier:
+ return curve
+
+ def get_by_key_length(self, key_length):
+ for curve in self.ecdsa_curves:
+ if curve.key_length == key_length:
+ return curve
+
+
+class ECDSAKey(PKey):
+ """
+ Representation of an ECDSA key which can be used to sign and verify SSH2
+ data.
+ """
+
+ _ECDSA_CURVES = _ECDSACurveSet(
+ [
+ _ECDSACurve(ec.SECP256R1, "nistp256"),
+ _ECDSACurve(ec.SECP384R1, "nistp384"),
+ _ECDSACurve(ec.SECP521R1, "nistp521"),
+ ]
+ )
+
+ def __init__(
+ self,
+ msg=None,
+ data=None,
+ filename=None,
+ password=None,
+ vals=None,
+ file_obj=None,
+ # TODO 4.0: remove; it does nothing since porting to cryptography.io
+ validate_point=True,
+ ):
+ self.verifying_key = None
+ self.signing_key = None
+ self.public_blob = None
+ if file_obj is not None:
+ self._from_private_key(file_obj, password)
+ return
+ if filename is not None:
+ self._from_private_key_file(filename, password)
+ return
+ if (msg is None) and (data is not None):
+ msg = Message(data)
+ if vals is not None:
+ self.signing_key, self.verifying_key = vals
+ c_class = self.signing_key.curve.__class__
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(c_class)
+ else:
+ # Must set ecdsa_curve first; subroutines called herein may need to
+ # spit out our get_name(), which relies on this.
+ key_type = msg.get_text()
+ # But this also means we need to hand it a real key/curve
+ # identifier, so strip out any cert business. (NOTE: could push
+ # that into _ECDSACurveSet.get_by_key_format_identifier(), but it
+ # feels more correct to do it here?)
+ suffix = "-cert-v01@openssh.com"
+ if key_type.endswith(suffix):
+ key_type = key_type[: -len(suffix)]
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_key_format_identifier(
+ key_type
+ )
+ key_types = self._ECDSA_CURVES.get_key_format_identifier_list()
+ cert_types = [
+ "{}-cert-v01@openssh.com".format(x) for x in key_types
+ ]
+ self._check_type_and_load_cert(
+ msg=msg, key_type=key_types, cert_type=cert_types
+ )
+ curvename = msg.get_text()
+ if curvename != self.ecdsa_curve.nist_name:
+ raise SSHException(
+ "Can't handle curve of type {}".format(curvename)
+ )
+
+ pointinfo = msg.get_binary()
+ try:
+ key = ec.EllipticCurvePublicKey.from_encoded_point(
+ self.ecdsa_curve.curve_class(), pointinfo
+ )
+ self.verifying_key = key
+ except ValueError:
+ raise SSHException("Invalid public key")
+
+ @classmethod
+ def identifiers(cls):
+ return cls._ECDSA_CURVES.get_key_format_identifier_list()
+
+ # TODO 4.0: deprecate/remove
+ @classmethod
+ def supported_key_format_identifiers(cls):
+ return cls.identifiers()
+
+ def asbytes(self):
+ key = self.verifying_key
+ m = Message()
+ m.add_string(self.ecdsa_curve.key_format_identifier)
+ m.add_string(self.ecdsa_curve.nist_name)
+
+ numbers = key.public_numbers()
+
+ key_size_bytes = (key.curve.key_size + 7) // 8
+
+ x_bytes = deflate_long(numbers.x, add_sign_padding=False)
+ x_bytes = b"\x00" * (key_size_bytes - len(x_bytes)) + x_bytes
+
+ y_bytes = deflate_long(numbers.y, add_sign_padding=False)
+ y_bytes = b"\x00" * (key_size_bytes - len(y_bytes)) + y_bytes
+
+ point_str = four_byte + x_bytes + y_bytes
+ m.add_string(point_str)
+ return m.asbytes()
+
+ def __str__(self):
+ return self.asbytes()
+
+ @property
+ def _fields(self):
+ return (
+ self.get_name(),
+ self.verifying_key.public_numbers().x,
+ self.verifying_key.public_numbers().y,
+ )
+
+ def get_name(self):
+ return self.ecdsa_curve.key_format_identifier
+
+ def get_bits(self):
+ return self.ecdsa_curve.key_length
+
+ def can_sign(self):
+ return self.signing_key is not None
+
+ def sign_ssh_data(self, data, algorithm=None):
+ ecdsa = ec.ECDSA(self.ecdsa_curve.hash_object())
+ sig = self.signing_key.sign(data, ecdsa)
+ r, s = decode_dss_signature(sig)
+
+ m = Message()
+ m.add_string(self.ecdsa_curve.key_format_identifier)
+ m.add_string(self._sigencode(r, s))
+ return m
+
+ def verify_ssh_sig(self, data, msg):
+ if msg.get_text() != self.ecdsa_curve.key_format_identifier:
+ return False
+ sig = msg.get_binary()
+ sigR, sigS = self._sigdecode(sig)
+ signature = encode_dss_signature(sigR, sigS)
+
+ try:
+ self.verifying_key.verify(
+ signature, data, ec.ECDSA(self.ecdsa_curve.hash_object())
+ )
+ except InvalidSignature:
+ return False
+ else:
+ return True
+
+ def write_private_key_file(self, filename, password=None):
+ self._write_private_key_file(
+ filename,
+ self.signing_key,
+ serialization.PrivateFormat.TraditionalOpenSSL,
+ password=password,
+ )
+
+ def write_private_key(self, file_obj, password=None):
+ self._write_private_key(
+ file_obj,
+ self.signing_key,
+ serialization.PrivateFormat.TraditionalOpenSSL,
+ password=password,
+ )
+
+ @classmethod
+ def generate(cls, curve=ec.SECP256R1(), progress_func=None, bits=None):
+ """
+ Generate a new private ECDSA key. This factory function can be used to
+ generate a new host key or authentication key.
+
+ :param progress_func: Not used for this type of key.
+ :returns: A new private key (`.ECDSAKey`) object
+ """
+ if bits is not None:
+ curve = cls._ECDSA_CURVES.get_by_key_length(bits)
+ if curve is None:
+ raise ValueError("Unsupported key length: {:d}".format(bits))
+ curve = curve.curve_class()
+
+ private_key = ec.generate_private_key(curve, backend=default_backend())
+ return ECDSAKey(vals=(private_key, private_key.public_key()))
+
+ # ...internals...
+
+ def _from_private_key_file(self, filename, password):
+ data = self._read_private_key_file("EC", filename, password)
+ self._decode_key(data)
+
+ def _from_private_key(self, file_obj, password):
+ data = self._read_private_key("EC", file_obj, password)
+ self._decode_key(data)
+
+ def _decode_key(self, data):
+ pkformat, data = data
+ if pkformat == self._PRIVATE_KEY_FORMAT_ORIGINAL:
+ try:
+ key = serialization.load_der_private_key(
+ data, password=None, backend=default_backend()
+ )
+ except (
+ ValueError,
+ AssertionError,
+ TypeError,
+ UnsupportedAlgorithm,
+ ) as e:
+ raise SSHException(str(e))
+ elif pkformat == self._PRIVATE_KEY_FORMAT_OPENSSH:
+ try:
+ msg = Message(data)
+ curve_name = msg.get_text()
+ verkey = msg.get_binary() # noqa: F841
+ sigkey = msg.get_mpint()
+ name = "ecdsa-sha2-" + curve_name
+ curve = self._ECDSA_CURVES.get_by_key_format_identifier(name)
+ if not curve:
+ raise SSHException("Invalid key curve identifier")
+ key = ec.derive_private_key(
+ sigkey, curve.curve_class(), default_backend()
+ )
+ except Exception as e:
+ # PKey._read_private_key_openssh() should check or return
+ # keytype - parsing could fail for any reason due to wrong type
+ raise SSHException(str(e))
+ else:
+ self._got_bad_key_format_id(pkformat)
+
+ self.signing_key = key
+ self.verifying_key = key.public_key()
+ curve_class = key.curve.__class__
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(curve_class)
+
+ def _sigencode(self, r, s):
+ msg = Message()
+ msg.add_mpint(r)
+ msg.add_mpint(s)
+ return msg.asbytes()
+
+ def _sigdecode(self, sig):
+ msg = Message(sig)
+ r = msg.get_mpint()
+ s = msg.get_mpint()
+ return r, s
diff --git a/lib/paramiko/ed25519key.py b/lib/paramiko/ed25519key.py
new file mode 100644
index 0000000..e5e81ac
--- /dev/null
+++ b/lib/paramiko/ed25519key.py
@@ -0,0 +1,212 @@
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import bcrypt
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers import Cipher
+
+import nacl.signing
+
+from paramiko.message import Message
+from paramiko.pkey import PKey, OPENSSH_AUTH_MAGIC, _unpad_openssh
+from paramiko.util import b
+from paramiko.ssh_exception import SSHException, PasswordRequiredException
+
+
+class Ed25519Key(PKey):
+ """
+ Representation of an `Ed25519 `_ key.
+
+ .. note::
+ Ed25519 key support was added to OpenSSH in version 6.5.
+
+ .. versionadded:: 2.2
+ .. versionchanged:: 2.3
+ Added a ``file_obj`` parameter to match other key classes.
+ """
+
+ name = "ssh-ed25519"
+
+ def __init__(
+ self, msg=None, data=None, filename=None, password=None, file_obj=None
+ ):
+ self.public_blob = None
+ verifying_key = signing_key = None
+ if msg is None and data is not None:
+ msg = Message(data)
+ if msg is not None:
+ self._check_type_and_load_cert(
+ msg=msg,
+ key_type=self.name,
+ cert_type="ssh-ed25519-cert-v01@openssh.com",
+ )
+ verifying_key = nacl.signing.VerifyKey(msg.get_binary())
+ elif filename is not None:
+ with open(filename, "r") as f:
+ pkformat, data = self._read_private_key("OPENSSH", f)
+ elif file_obj is not None:
+ pkformat, data = self._read_private_key("OPENSSH", file_obj)
+
+ if filename or file_obj:
+ signing_key = self._parse_signing_key_data(data, password)
+
+ if signing_key is None and verifying_key is None:
+ raise ValueError("need a key")
+
+ self._signing_key = signing_key
+ self._verifying_key = verifying_key
+
+ def _parse_signing_key_data(self, data, password):
+ from paramiko.transport import Transport
+
+ # We may eventually want this to be usable for other key types, as
+ # OpenSSH moves to it, but for now this is just for Ed25519 keys.
+ # This format is described here:
+ # https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
+ # The description isn't totally complete, and I had to refer to the
+ # source for a full implementation.
+ message = Message(data)
+ if message.get_bytes(len(OPENSSH_AUTH_MAGIC)) != OPENSSH_AUTH_MAGIC:
+ raise SSHException("Invalid key")
+
+ ciphername = message.get_text()
+ kdfname = message.get_text()
+ kdfoptions = message.get_binary()
+ num_keys = message.get_int()
+
+ if kdfname == "none":
+ # kdfname of "none" must have an empty kdfoptions, the ciphername
+ # must be "none"
+ if kdfoptions or ciphername != "none":
+ raise SSHException("Invalid key")
+ elif kdfname == "bcrypt":
+ if not password:
+ raise PasswordRequiredException(
+ "Private key file is encrypted"
+ )
+ kdf = Message(kdfoptions)
+ bcrypt_salt = kdf.get_binary()
+ bcrypt_rounds = kdf.get_int()
+ else:
+ raise SSHException("Invalid key")
+
+ if ciphername != "none" and ciphername not in Transport._cipher_info:
+ raise SSHException("Invalid key")
+
+ public_keys = []
+ for _ in range(num_keys):
+ pubkey = Message(message.get_binary())
+ if pubkey.get_text() != self.name:
+ raise SSHException("Invalid key")
+ public_keys.append(pubkey.get_binary())
+
+ private_ciphertext = message.get_binary()
+ if ciphername == "none":
+ private_data = private_ciphertext
+ else:
+ cipher = Transport._cipher_info[ciphername]
+ key = bcrypt.kdf(
+ password=b(password),
+ salt=bcrypt_salt,
+ desired_key_bytes=cipher["key-size"] + cipher["block-size"],
+ rounds=bcrypt_rounds,
+ # We can't control how many rounds are on disk, so no sense
+ # warning about it.
+ ignore_few_rounds=True,
+ )
+ decryptor = Cipher(
+ cipher["class"](key[: cipher["key-size"]]),
+ cipher["mode"](key[cipher["key-size"] :]),
+ backend=default_backend(),
+ ).decryptor()
+ private_data = (
+ decryptor.update(private_ciphertext) + decryptor.finalize()
+ )
+
+ message = Message(_unpad_openssh(private_data))
+ if message.get_int() != message.get_int():
+ raise SSHException("Invalid key")
+
+ signing_keys = []
+ for i in range(num_keys):
+ if message.get_text() != self.name:
+ raise SSHException("Invalid key")
+ # A copy of the public key, again, ignore.
+ public = message.get_binary()
+ key_data = message.get_binary()
+ # The second half of the key data is yet another copy of the public
+ # key...
+ signing_key = nacl.signing.SigningKey(key_data[:32])
+ # Verify that all the public keys are the same...
+ assert (
+ signing_key.verify_key.encode()
+ == public
+ == public_keys[i]
+ == key_data[32:]
+ )
+ signing_keys.append(signing_key)
+ # Comment, ignore.
+ message.get_binary()
+
+ if len(signing_keys) != 1:
+ raise SSHException("Invalid key")
+ return signing_keys[0]
+
+ def asbytes(self):
+ if self.can_sign():
+ v = self._signing_key.verify_key
+ else:
+ v = self._verifying_key
+ m = Message()
+ m.add_string(self.name)
+ m.add_string(v.encode())
+ return m.asbytes()
+
+ @property
+ def _fields(self):
+ if self.can_sign():
+ v = self._signing_key.verify_key
+ else:
+ v = self._verifying_key
+ return (self.get_name(), v)
+
+ # TODO 4.0: remove
+ def get_name(self):
+ return self.name
+
+ def get_bits(self):
+ return 256
+
+ def can_sign(self):
+ return self._signing_key is not None
+
+ def sign_ssh_data(self, data, algorithm=None):
+ m = Message()
+ m.add_string(self.name)
+ m.add_string(self._signing_key.sign(data).signature)
+ return m
+
+ def verify_ssh_sig(self, data, msg):
+ if msg.get_text() != self.name:
+ return False
+
+ try:
+ self._verifying_key.verify(data, msg.get_binary())
+ except nacl.exceptions.BadSignatureError:
+ return False
+ else:
+ return True
diff --git a/lib/paramiko/file.py b/lib/paramiko/file.py
new file mode 100644
index 0000000..a36abb9
--- /dev/null
+++ b/lib/paramiko/file.py
@@ -0,0 +1,528 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+from io import BytesIO
+
+from paramiko.common import (
+ linefeed_byte_value,
+ crlf,
+ cr_byte,
+ linefeed_byte,
+ cr_byte_value,
+)
+
+from paramiko.util import ClosingContextManager, u
+
+
+class BufferedFile(ClosingContextManager):
+ """
+ Reusable base class to implement Python-style file buffering around a
+ simpler stream.
+ """
+
+ _DEFAULT_BUFSIZE = 8192
+
+ SEEK_SET = 0
+ SEEK_CUR = 1
+ SEEK_END = 2
+
+ FLAG_READ = 0x1
+ FLAG_WRITE = 0x2
+ FLAG_APPEND = 0x4
+ FLAG_BINARY = 0x10
+ FLAG_BUFFERED = 0x20
+ FLAG_LINE_BUFFERED = 0x40
+ FLAG_UNIVERSAL_NEWLINE = 0x80
+
+ def __init__(self):
+ self.newlines = None
+ self._flags = 0
+ self._bufsize = self._DEFAULT_BUFSIZE
+ self._wbuffer = BytesIO()
+ self._rbuffer = bytes()
+ self._at_trailing_cr = False
+ self._closed = False
+ # pos - position within the file, according to the user
+ # realpos - position according the OS
+ # (these may be different because we buffer for line reading)
+ self._pos = self._realpos = 0
+ # size only matters for seekable files
+ self._size = 0
+
+ def __del__(self):
+ self.close()
+
+ def __iter__(self):
+ """
+ Returns an iterator that can be used to iterate over the lines in this
+ file. This iterator happens to return the file itself, since a file is
+ its own iterator.
+
+ :raises: ``ValueError`` -- if the file is closed.
+ """
+ if self._closed:
+ raise ValueError("I/O operation on closed file")
+ return self
+
+ def close(self):
+ """
+ Close the file. Future read and write operations will fail.
+ """
+ self.flush()
+ self._closed = True
+
+ def flush(self):
+ """
+ Write out any data in the write buffer. This may do nothing if write
+ buffering is not turned on.
+ """
+ self._write_all(self._wbuffer.getvalue())
+ self._wbuffer = BytesIO()
+ return
+
+ def __next__(self):
+ """
+ Returns the next line from the input, or raises ``StopIteration``
+ when EOF is hit. Unlike python file objects, it's okay to mix
+ calls to `.next` and `.readline`.
+
+ :raises: ``StopIteration`` -- when the end of the file is reached.
+
+ :returns:
+ a line (`str`, or `bytes` if the file was opened in binary mode)
+ read from the file.
+ """
+ line = self.readline()
+ if not line:
+ raise StopIteration
+ return line
+
+ def readable(self):
+ """
+ Check if the file can be read from.
+
+ :returns:
+ `True` if the file can be read from. If `False`, `read` will raise
+ an exception.
+ """
+ return (self._flags & self.FLAG_READ) == self.FLAG_READ
+
+ def writable(self):
+ """
+ Check if the file can be written to.
+
+ :returns:
+ `True` if the file can be written to. If `False`, `write` will
+ raise an exception.
+ """
+ return (self._flags & self.FLAG_WRITE) == self.FLAG_WRITE
+
+ def seekable(self):
+ """
+ Check if the file supports random access.
+
+ :returns:
+ `True` if the file supports random access. If `False`, `seek` will
+ raise an exception.
+ """
+ return False
+
+ def readinto(self, buff):
+ """
+ Read up to ``len(buff)`` bytes into ``bytearray`` *buff* and return the
+ number of bytes read.
+
+ :returns:
+ The number of bytes read.
+ """
+ data = self.read(len(buff))
+ buff[: len(data)] = data
+ return len(data)
+
+ def read(self, size=None):
+ """
+ Read at most ``size`` bytes from the file (less if we hit the end of
+ the file first). If the ``size`` argument is negative or omitted,
+ read all the remaining data in the file.
+
+ .. note::
+ ``'b'`` mode flag is ignored (``self.FLAG_BINARY`` in
+ ``self._flags``), because SSH treats all files as binary, since we
+ have no idea what encoding the file is in, or even if the file is
+ text data.
+
+ :param int size: maximum number of bytes to read
+ :returns:
+ data read from the file (as bytes), or an empty string if EOF was
+ encountered immediately
+ """
+ if self._closed:
+ raise IOError("File is closed")
+ if not (self._flags & self.FLAG_READ):
+ raise IOError("File is not open for reading")
+ if (size is None) or (size < 0):
+ # go for broke
+ result = bytearray(self._rbuffer)
+ self._rbuffer = bytes()
+ self._pos += len(result)
+ while True:
+ try:
+ new_data = self._read(self._DEFAULT_BUFSIZE)
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ break
+ result.extend(new_data)
+ self._realpos += len(new_data)
+ self._pos += len(new_data)
+ return bytes(result)
+ if size <= len(self._rbuffer):
+ result = self._rbuffer[:size]
+ self._rbuffer = self._rbuffer[size:]
+ self._pos += len(result)
+ return result
+ while len(self._rbuffer) < size:
+ read_size = size - len(self._rbuffer)
+ if self._flags & self.FLAG_BUFFERED:
+ read_size = max(self._bufsize, read_size)
+ try:
+ new_data = self._read(read_size)
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ break
+ self._rbuffer += new_data
+ self._realpos += len(new_data)
+ result = self._rbuffer[:size]
+ self._rbuffer = self._rbuffer[size:]
+ self._pos += len(result)
+ return result
+
+ def readline(self, size=None):
+ """
+ Read one entire line from the file. A trailing newline character is
+ kept in the string (but may be absent when a file ends with an
+ incomplete line). If the size argument is present and non-negative, it
+ is a maximum byte count (including the trailing newline) and an
+ incomplete line may be returned. An empty string is returned only when
+ EOF is encountered immediately.
+
+ .. note::
+ Unlike stdio's ``fgets``, the returned string contains null
+ characters (``'\\0'``) if they occurred in the input.
+
+ :param int size: maximum length of returned string.
+ :returns:
+ next line of the file, or an empty string if the end of the
+ file has been reached.
+
+ If the file was opened in binary (``'b'``) mode: bytes are returned
+ Else: the encoding of the file is assumed to be UTF-8 and character
+ strings (`str`) are returned
+ """
+ # it's almost silly how complex this function is.
+ if self._closed:
+ raise IOError("File is closed")
+ if not (self._flags & self.FLAG_READ):
+ raise IOError("File not open for reading")
+ line = self._rbuffer
+ truncated = False
+ while True:
+ if (
+ self._at_trailing_cr
+ and self._flags & self.FLAG_UNIVERSAL_NEWLINE
+ and len(line) > 0
+ ):
+ # edge case: the newline may be '\r\n' and we may have read
+ # only the first '\r' last time.
+ if line[0] == linefeed_byte_value:
+ line = line[1:]
+ self._record_newline(crlf)
+ else:
+ self._record_newline(cr_byte)
+ self._at_trailing_cr = False
+ # check size before looking for a linefeed, in case we already have
+ # enough.
+ if (size is not None) and (size >= 0):
+ if len(line) >= size:
+ # truncate line
+ self._rbuffer = line[size:]
+ line = line[:size]
+ truncated = True
+ break
+ n = size - len(line)
+ else:
+ n = self._bufsize
+ if linefeed_byte in line or (
+ self._flags & self.FLAG_UNIVERSAL_NEWLINE and cr_byte in line
+ ):
+ break
+ try:
+ new_data = self._read(n)
+ except EOFError:
+ new_data = None
+ if (new_data is None) or (len(new_data) == 0):
+ self._rbuffer = bytes()
+ self._pos += len(line)
+ return line if self._flags & self.FLAG_BINARY else u(line)
+ line += new_data
+ self._realpos += len(new_data)
+ # find the newline
+ pos = line.find(linefeed_byte)
+ if self._flags & self.FLAG_UNIVERSAL_NEWLINE:
+ rpos = line.find(cr_byte)
+ if (rpos >= 0) and (rpos < pos or pos < 0):
+ pos = rpos
+ if pos == -1:
+ # we couldn't find a newline in the truncated string, return it
+ self._pos += len(line)
+ return line if self._flags & self.FLAG_BINARY else u(line)
+ xpos = pos + 1
+ if (
+ line[pos] == cr_byte_value
+ and xpos < len(line)
+ and line[xpos] == linefeed_byte_value
+ ):
+ xpos += 1
+ # if the string was truncated, _rbuffer needs to have the string after
+ # the newline character plus the truncated part of the line we stored
+ # earlier in _rbuffer
+ if truncated:
+ self._rbuffer = line[xpos:] + self._rbuffer
+ else:
+ self._rbuffer = line[xpos:]
+
+ lf = line[pos:xpos]
+ line = line[:pos] + linefeed_byte
+ if (len(self._rbuffer) == 0) and (lf == cr_byte):
+ # we could read the line up to a '\r' and there could still be a
+ # '\n' following that we read next time. note that and eat it.
+ self._at_trailing_cr = True
+ else:
+ self._record_newline(lf)
+ self._pos += len(line)
+ return line if self._flags & self.FLAG_BINARY else u(line)
+
+ def readlines(self, sizehint=None):
+ """
+ Read all remaining lines using `readline` and return them as a list.
+ If the optional ``sizehint`` argument is present, instead of reading up
+ to EOF, whole lines totalling approximately sizehint bytes (possibly
+ after rounding up to an internal buffer size) are read.
+
+ :param int sizehint: desired maximum number of bytes to read.
+ :returns: list of lines read from the file.
+ """
+ lines = []
+ byte_count = 0
+ while True:
+ line = self.readline()
+ if len(line) == 0:
+ break
+ lines.append(line)
+ byte_count += len(line)
+ if (sizehint is not None) and (byte_count >= sizehint):
+ break
+ return lines
+
+ def seek(self, offset, whence=0):
+ """
+ Set the file's current position, like stdio's ``fseek``. Not all file
+ objects support seeking.
+
+ .. note::
+ If a file is opened in append mode (``'a'`` or ``'a+'``), any seek
+ operations will be undone at the next write (as the file position
+ will move back to the end of the file).
+
+ :param int offset:
+ position to move to within the file, relative to ``whence``.
+ :param int whence:
+ type of movement: 0 = absolute; 1 = relative to the current
+ position; 2 = relative to the end of the file.
+
+ :raises: ``IOError`` -- if the file doesn't support random access.
+ """
+ raise IOError("File does not support seeking.")
+
+ def tell(self):
+ """
+ Return the file's current position. This may not be accurate or
+ useful if the underlying file doesn't support random access, or was
+ opened in append mode.
+
+ :returns: file position (`number ` of bytes).
+ """
+ return self._pos
+
+ def write(self, data):
+ """
+ Write data to the file. If write buffering is on (``bufsize`` was
+ specified and non-zero), some or all of the data may not actually be
+ written yet. (Use `flush` or `close` to force buffered data to be
+ written out.)
+
+ :param data: ``str``/``bytes`` data to write
+ """
+ if isinstance(data, str):
+ # Accept text and encode as utf-8 for compatibility only.
+ data = data.encode("utf-8")
+ if self._closed:
+ raise IOError("File is closed")
+ if not (self._flags & self.FLAG_WRITE):
+ raise IOError("File not open for writing")
+ if not (self._flags & self.FLAG_BUFFERED):
+ self._write_all(data)
+ return
+ self._wbuffer.write(data)
+ if self._flags & self.FLAG_LINE_BUFFERED:
+ # only scan the new data for linefeed, to avoid wasting time.
+ last_newline_pos = data.rfind(linefeed_byte)
+ if last_newline_pos >= 0:
+ wbuf = self._wbuffer.getvalue()
+ last_newline_pos += len(wbuf) - len(data)
+ self._write_all(wbuf[: last_newline_pos + 1])
+ self._wbuffer = BytesIO()
+ self._wbuffer.write(wbuf[last_newline_pos + 1 :])
+ return
+ # even if we're line buffering, if the buffer has grown past the
+ # buffer size, force a flush.
+ if self._wbuffer.tell() >= self._bufsize:
+ self.flush()
+ return
+
+ def writelines(self, sequence):
+ """
+ Write a sequence of strings to the file. The sequence can be any
+ iterable object producing strings, typically a list of strings. (The
+ name is intended to match `readlines`; `writelines` does not add line
+ separators.)
+
+ :param sequence: an iterable sequence of strings.
+ """
+ for line in sequence:
+ self.write(line)
+ return
+
+ def xreadlines(self):
+ """
+ Identical to ``iter(f)``. This is a deprecated file interface that
+ predates Python iterator support.
+ """
+ return self
+
+ @property
+ def closed(self):
+ return self._closed
+
+ # ...overrides...
+
+ def _read(self, size):
+ """
+ (subclass override)
+ Read data from the stream. Return ``None`` or raise ``EOFError`` to
+ indicate EOF.
+ """
+ raise EOFError()
+
+ def _write(self, data):
+ """
+ (subclass override)
+ Write data into the stream.
+ """
+ raise IOError("write not implemented")
+
+ def _get_size(self):
+ """
+ (subclass override)
+ Return the size of the file. This is called from within `_set_mode`
+ if the file is opened in append mode, so the file position can be
+ tracked and `seek` and `tell` will work correctly. If the file is
+ a stream that can't be randomly accessed, you don't need to override
+ this method,
+ """
+ return 0
+
+ # ...internals...
+
+ def _set_mode(self, mode="r", bufsize=-1):
+ """
+ Subclasses call this method to initialize the BufferedFile.
+ """
+ # set bufsize in any event, because it's used for readline().
+ self._bufsize = self._DEFAULT_BUFSIZE
+ if bufsize < 0:
+ # do no buffering by default, because otherwise writes will get
+ # buffered in a way that will probably confuse people.
+ bufsize = 0
+ if bufsize == 1:
+ # apparently, line buffering only affects writes. reads are only
+ # buffered if you call readline (directly or indirectly: iterating
+ # over a file will indirectly call readline).
+ self._flags |= self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED
+ elif bufsize > 1:
+ self._bufsize = bufsize
+ self._flags |= self.FLAG_BUFFERED
+ self._flags &= ~self.FLAG_LINE_BUFFERED
+ elif bufsize == 0:
+ # unbuffered
+ self._flags &= ~(self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED)
+
+ if ("r" in mode) or ("+" in mode):
+ self._flags |= self.FLAG_READ
+ if ("w" in mode) or ("+" in mode):
+ self._flags |= self.FLAG_WRITE
+ if "a" in mode:
+ self._flags |= self.FLAG_WRITE | self.FLAG_APPEND
+ self._size = self._get_size()
+ self._pos = self._realpos = self._size
+ if "b" in mode:
+ self._flags |= self.FLAG_BINARY
+ if "U" in mode:
+ self._flags |= self.FLAG_UNIVERSAL_NEWLINE
+ # built-in file objects have this attribute to store which kinds of
+ # line terminations they've seen:
+ #
+ self.newlines = None
+
+ def _write_all(self, raw_data):
+ # the underlying stream may be something that does partial writes (like
+ # a socket).
+ data = memoryview(raw_data)
+ while len(data) > 0:
+ count = self._write(data)
+ data = data[count:]
+ if self._flags & self.FLAG_APPEND:
+ self._size += count
+ self._pos = self._realpos = self._size
+ else:
+ self._pos += count
+ self._realpos += count
+ return None
+
+ def _record_newline(self, newline):
+ # silliness about tracking what kinds of newlines we've seen.
+ # i don't understand why it can be None, a string, or a tuple, instead
+ # of just always being a tuple, but we'll emulate that behavior anyway.
+ if not (self._flags & self.FLAG_UNIVERSAL_NEWLINE):
+ return
+ if self.newlines is None:
+ self.newlines = newline
+ elif self.newlines != newline and isinstance(self.newlines, bytes):
+ self.newlines = (self.newlines, newline)
+ elif newline not in self.newlines:
+ self.newlines += (newline,)
diff --git a/lib/paramiko/hostkeys.py b/lib/paramiko/hostkeys.py
new file mode 100644
index 0000000..0bcf6c3
--- /dev/null
+++ b/lib/paramiko/hostkeys.py
@@ -0,0 +1,384 @@
+# Copyright (C) 2006-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+from base64 import encodebytes, decodebytes
+import binascii
+import os
+import re
+
+from collections.abc import MutableMapping
+from hashlib import sha1
+from hmac import HMAC
+
+
+from paramiko.pkey import PKey, UnknownKeyType
+from paramiko.util import get_logger, constant_time_bytes_eq, b, u
+from paramiko.ssh_exception import SSHException
+
+
+class HostKeys(MutableMapping):
+ """
+ Representation of an OpenSSH-style "known hosts" file. Host keys can be
+ read from one or more files, and then individual hosts can be looked up to
+ verify server keys during SSH negotiation.
+
+ A `.HostKeys` object can be treated like a dict; any dict lookup is
+ equivalent to calling `lookup`.
+
+ .. versionadded:: 1.5.3
+ """
+
+ def __init__(self, filename=None):
+ """
+ Create a new HostKeys object, optionally loading keys from an OpenSSH
+ style host-key file.
+
+ :param str filename: filename to load host keys from, or ``None``
+ """
+ # emulate a dict of { hostname: { keytype: PKey } }
+ self._entries = []
+ if filename is not None:
+ self.load(filename)
+
+ def add(self, hostname, keytype, key):
+ """
+ Add a host key entry to the table. Any existing entry for a
+ ``(hostname, keytype)`` pair will be replaced.
+
+ :param str hostname: the hostname (or IP) to add
+ :param str keytype: key type (in ``"ssh-"`` format)
+ :param .PKey key: the key to add
+ """
+ for e in self._entries:
+ if (hostname in e.hostnames) and (e.key.get_name() == keytype):
+ e.key = key
+ return
+ self._entries.append(HostKeyEntry([hostname], key))
+
+ def load(self, filename):
+ """
+ Read a file of known SSH host keys, in the format used by OpenSSH.
+ This type of file unfortunately doesn't exist on Windows, but on
+ posix, it will usually be stored in
+ ``os.path.expanduser("~/.ssh/known_hosts")``.
+
+ If this method is called multiple times, the host keys are merged,
+ not cleared. So multiple calls to `load` will just call `add`,
+ replacing any existing entries and adding new ones.
+
+ :param str filename: name of the file to read host keys from
+
+ :raises: ``IOError`` -- if there was an error reading the file
+ """
+ with open(filename, "r") as f:
+ for lineno, line in enumerate(f, 1):
+ line = line.strip()
+ if (len(line) == 0) or (line[0] == "#"):
+ continue
+ try:
+ entry = HostKeyEntry.from_line(line, lineno)
+ except SSHException:
+ continue
+ if entry is not None:
+ _hostnames = entry.hostnames
+ for h in _hostnames:
+ if self.check(h, entry.key):
+ entry.hostnames.remove(h)
+ if len(entry.hostnames):
+ self._entries.append(entry)
+
+ def save(self, filename):
+ """
+ Save host keys into a file, in the format used by OpenSSH. The order
+ of keys in the file will be preserved when possible (if these keys were
+ loaded from a file originally). The single exception is that combined
+ lines will be split into individual key lines, which is arguably a bug.
+
+ :param str filename: name of the file to write
+
+ :raises: ``IOError`` -- if there was an error writing the file
+
+ .. versionadded:: 1.6.1
+ """
+ with open(filename, "w") as f:
+ for e in self._entries:
+ line = e.to_line()
+ if line:
+ f.write(line)
+
+ def lookup(self, hostname):
+ """
+ Find a hostkey entry for a given hostname or IP. If no entry is found,
+ ``None`` is returned. Otherwise a dictionary of keytype to key is
+ returned.
+
+ :param str hostname: the hostname (or IP) to lookup
+ :return: dict of `str` -> `.PKey` keys associated with this host
+ (or ``None``)
+ """
+
+ class SubDict(MutableMapping):
+ def __init__(self, hostname, entries, hostkeys):
+ self._hostname = hostname
+ self._entries = entries
+ self._hostkeys = hostkeys
+
+ def __iter__(self):
+ for k in self.keys():
+ yield k
+
+ def __len__(self):
+ return len(self.keys())
+
+ def __delitem__(self, key):
+ for e in list(self._entries):
+ if e.key.get_name() == key:
+ self._entries.remove(e)
+ break
+ else:
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ for e in self._entries:
+ if e.key.get_name() == key:
+ return e.key
+ raise KeyError(key)
+
+ def __setitem__(self, key, val):
+ for e in self._entries:
+ if e.key is None:
+ continue
+ if e.key.get_name() == key:
+ # replace
+ e.key = val
+ break
+ else:
+ # add a new one
+ e = HostKeyEntry([hostname], val)
+ self._entries.append(e)
+ self._hostkeys._entries.append(e)
+
+ def keys(self):
+ return [
+ e.key.get_name()
+ for e in self._entries
+ if e.key is not None
+ ]
+
+ entries = []
+ for e in self._entries:
+ if self._hostname_matches(hostname, e):
+ entries.append(e)
+ if len(entries) == 0:
+ return None
+ return SubDict(hostname, entries, self)
+
+ def _hostname_matches(self, hostname, entry):
+ """
+ Tests whether ``hostname`` string matches given SubDict ``entry``.
+
+ :returns bool:
+ """
+ for h in entry.hostnames:
+ if (
+ h == hostname
+ or h.startswith("|1|")
+ and not hostname.startswith("|1|")
+ and constant_time_bytes_eq(self.hash_host(hostname, h), h)
+ ):
+ return True
+ return False
+
+ def check(self, hostname, key):
+ """
+ Return True if the given key is associated with the given hostname
+ in this dictionary.
+
+ :param str hostname: hostname (or IP) of the SSH server
+ :param .PKey key: the key to check
+ :return:
+ ``True`` if the key is associated with the hostname; else ``False``
+ """
+ k = self.lookup(hostname)
+ if k is None:
+ return False
+ host_key = k.get(key.get_name(), None)
+ if host_key is None:
+ return False
+ return host_key.asbytes() == key.asbytes()
+
+ def clear(self):
+ """
+ Remove all host keys from the dictionary.
+ """
+ self._entries = []
+
+ def __iter__(self):
+ for k in self.keys():
+ yield k
+
+ def __len__(self):
+ return len(self.keys())
+
+ def __getitem__(self, key):
+ ret = self.lookup(key)
+ if ret is None:
+ raise KeyError(key)
+ return ret
+
+ def __delitem__(self, key):
+ index = None
+ for i, entry in enumerate(self._entries):
+ if self._hostname_matches(key, entry):
+ index = i
+ break
+ if index is None:
+ raise KeyError(key)
+ self._entries.pop(index)
+
+ def __setitem__(self, hostname, entry):
+ # don't use this please.
+ if len(entry) == 0:
+ self._entries.append(HostKeyEntry([hostname], None))
+ return
+ for key_type in entry.keys():
+ found = False
+ for e in self._entries:
+ if (hostname in e.hostnames) and e.key.get_name() == key_type:
+ # replace
+ e.key = entry[key_type]
+ found = True
+ if not found:
+ self._entries.append(HostKeyEntry([hostname], entry[key_type]))
+
+ def keys(self):
+ ret = []
+ for e in self._entries:
+ for h in e.hostnames:
+ if h not in ret:
+ ret.append(h)
+ return ret
+
+ def values(self):
+ ret = []
+ for k in self.keys():
+ ret.append(self.lookup(k))
+ return ret
+
+ @staticmethod
+ def hash_host(hostname, salt=None):
+ """
+ Return a "hashed" form of the hostname, as used by OpenSSH when storing
+ hashed hostnames in the known_hosts file.
+
+ :param str hostname: the hostname to hash
+ :param str salt: optional salt to use when hashing
+ (must be 20 bytes long)
+ :return: the hashed hostname as a `str`
+ """
+ if salt is None:
+ salt = os.urandom(sha1().digest_size)
+ else:
+ if salt.startswith("|1|"):
+ salt = salt.split("|")[2]
+ salt = decodebytes(b(salt))
+ assert len(salt) == sha1().digest_size
+ hmac = HMAC(salt, b(hostname), sha1).digest()
+ hostkey = "|1|{}|{}".format(u(encodebytes(salt)), u(encodebytes(hmac)))
+ return hostkey.replace("\n", "")
+
+
+class InvalidHostKey(Exception):
+ def __init__(self, line, exc):
+ self.line = line
+ self.exc = exc
+ self.args = (line, exc)
+
+
+class HostKeyEntry:
+ """
+ Representation of a line in an OpenSSH-style "known hosts" file.
+ """
+
+ def __init__(self, hostnames=None, key=None):
+ self.valid = (hostnames is not None) and (key is not None)
+ self.hostnames = hostnames
+ self.key = key
+
+ @classmethod
+ def from_line(cls, line, lineno=None):
+ """
+ Parses the given line of text to find the names for the host,
+ the type of key, and the key data. The line is expected to be in the
+ format used by the OpenSSH known_hosts file. Fields are separated by a
+ single space or tab.
+
+ Lines are expected to not have leading or trailing whitespace.
+ We don't bother to check for comments or empty lines. All of
+ that should be taken care of before sending the line to us.
+
+ :param str line: a line from an OpenSSH known_hosts file
+ """
+ log = get_logger("paramiko.hostkeys")
+ fields = re.split(" |\t", line)
+ if len(fields) < 3:
+ # Bad number of fields
+ msg = "Not enough fields found in known_hosts in line {} ({!r})"
+ log.info(msg.format(lineno, line))
+ return None
+ fields = fields[:3]
+
+ names, key_type, key = fields
+ names = names.split(",")
+
+ # Decide what kind of key we're looking at and create an object
+ # to hold it accordingly.
+ try:
+ # TODO: this grew organically and doesn't seem /wrong/ per se (file
+ # read -> unicode str -> bytes for base64 decode -> decoded bytes);
+ # but in Python 3 forever land, can we simply use
+ # `base64.b64decode(str-from-file)` here?
+ key_bytes = decodebytes(b(key))
+ except binascii.Error as e:
+ raise InvalidHostKey(line, e)
+
+ try:
+ return cls(names, PKey.from_type_string(key_type, key_bytes))
+ except UnknownKeyType:
+ # TODO 4.0: consider changing HostKeys API so this just raises
+ # naturally and the exception is muted higher up in the stack?
+ log.info("Unable to handle key of type {}".format(key_type))
+ return None
+
+ def to_line(self):
+ """
+ Returns a string in OpenSSH known_hosts file format, or None if
+ the object is not in a valid state. A trailing newline is
+ included.
+ """
+ if self.valid:
+ return "{} {} {}\n".format(
+ ",".join(self.hostnames),
+ self.key.get_name(),
+ self.key.get_base64(),
+ )
+ return None
+
+ def __repr__(self):
+ return "".format(self.hostnames, self.key)
diff --git a/lib/paramiko/kex_curve25519.py b/lib/paramiko/kex_curve25519.py
new file mode 100644
index 0000000..20c23e4
--- /dev/null
+++ b/lib/paramiko/kex_curve25519.py
@@ -0,0 +1,131 @@
+import binascii
+import hashlib
+
+from cryptography.exceptions import UnsupportedAlgorithm
+from cryptography.hazmat.primitives import constant_time, serialization
+from cryptography.hazmat.primitives.asymmetric.x25519 import (
+ X25519PrivateKey,
+ X25519PublicKey,
+)
+
+from paramiko.message import Message
+from paramiko.common import byte_chr
+from paramiko.ssh_exception import SSHException
+
+
+_MSG_KEXECDH_INIT, _MSG_KEXECDH_REPLY = range(30, 32)
+c_MSG_KEXECDH_INIT, c_MSG_KEXECDH_REPLY = [byte_chr(c) for c in range(30, 32)]
+
+
+class KexCurve25519:
+ hash_algo = hashlib.sha256
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.key = None
+
+ @classmethod
+ def is_available(cls):
+ try:
+ X25519PrivateKey.generate()
+ except UnsupportedAlgorithm:
+ return False
+ else:
+ return True
+
+ def _perform_exchange(self, peer_key):
+ secret = self.key.exchange(peer_key)
+ if constant_time.bytes_eq(secret, b"\x00" * 32):
+ raise SSHException(
+ "peer's curve25519 public value has wrong order"
+ )
+ return secret
+
+ def start_kex(self):
+ self.key = X25519PrivateKey.generate()
+ if self.transport.server_mode:
+ self.transport._expect_packet(_MSG_KEXECDH_INIT)
+ return
+
+ m = Message()
+ m.add_byte(c_MSG_KEXECDH_INIT)
+ m.add_string(
+ self.key.public_key().public_bytes(
+ serialization.Encoding.Raw, serialization.PublicFormat.Raw
+ )
+ )
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXECDH_REPLY)
+
+ def parse_next(self, ptype, m):
+ if self.transport.server_mode and (ptype == _MSG_KEXECDH_INIT):
+ return self._parse_kexecdh_init(m)
+ elif not self.transport.server_mode and (ptype == _MSG_KEXECDH_REPLY):
+ return self._parse_kexecdh_reply(m)
+ raise SSHException(
+ "KexCurve25519 asked to handle packet type {:d}".format(ptype)
+ )
+
+ def _parse_kexecdh_init(self, m):
+ peer_key_bytes = m.get_string()
+ peer_key = X25519PublicKey.from_public_bytes(peer_key_bytes)
+ K = self._perform_exchange(peer_key)
+ K = int(binascii.hexlify(K), 16)
+ # compute exchange hash
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ )
+ server_key_bytes = self.transport.get_server_key().asbytes()
+ exchange_key_bytes = self.key.public_key().public_bytes(
+ serialization.Encoding.Raw, serialization.PublicFormat.Raw
+ )
+ hm.add_string(server_key_bytes)
+ hm.add_string(peer_key_bytes)
+ hm.add_string(exchange_key_bytes)
+ hm.add_mpint(K)
+ H = self.hash_algo(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ sig = self.transport.get_server_key().sign_ssh_data(
+ H, self.transport.host_key_type
+ )
+ # construct reply
+ m = Message()
+ m.add_byte(c_MSG_KEXECDH_REPLY)
+ m.add_string(server_key_bytes)
+ m.add_string(exchange_key_bytes)
+ m.add_string(sig)
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
+
+ def _parse_kexecdh_reply(self, m):
+ peer_host_key_bytes = m.get_string()
+ peer_key_bytes = m.get_string()
+ sig = m.get_binary()
+
+ peer_key = X25519PublicKey.from_public_bytes(peer_key_bytes)
+
+ K = self._perform_exchange(peer_key)
+ K = int(binascii.hexlify(K), 16)
+ # compute exchange hash and verify signature
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ )
+ hm.add_string(peer_host_key_bytes)
+ hm.add_string(
+ self.key.public_key().public_bytes(
+ serialization.Encoding.Raw, serialization.PublicFormat.Raw
+ )
+ )
+ hm.add_string(peer_key_bytes)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
+ self.transport._verify_key(peer_host_key_bytes, sig)
+ self.transport._activate_outbound()
diff --git a/lib/paramiko/kex_ecdh_nist.py b/lib/paramiko/kex_ecdh_nist.py
new file mode 100644
index 0000000..41fab46
--- /dev/null
+++ b/lib/paramiko/kex_ecdh_nist.py
@@ -0,0 +1,151 @@
+"""
+Ephemeral Elliptic Curve Diffie-Hellman (ECDH) key exchange
+RFC 5656, Section 4
+"""
+
+from hashlib import sha256, sha384, sha512
+from paramiko.common import byte_chr
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import ec
+from cryptography.hazmat.primitives import serialization
+from binascii import hexlify
+
+_MSG_KEXECDH_INIT, _MSG_KEXECDH_REPLY = range(30, 32)
+c_MSG_KEXECDH_INIT, c_MSG_KEXECDH_REPLY = [byte_chr(c) for c in range(30, 32)]
+
+
+class KexNistp256:
+
+ name = "ecdh-sha2-nistp256"
+ hash_algo = sha256
+ curve = ec.SECP256R1()
+
+ def __init__(self, transport):
+ self.transport = transport
+ # private key, client public and server public keys
+ self.P = 0
+ self.Q_C = None
+ self.Q_S = None
+
+ def start_kex(self):
+ self._generate_key_pair()
+ if self.transport.server_mode:
+ self.transport._expect_packet(_MSG_KEXECDH_INIT)
+ return
+ m = Message()
+ m.add_byte(c_MSG_KEXECDH_INIT)
+ # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
+ m.add_string(
+ self.Q_C.public_bytes(
+ serialization.Encoding.X962,
+ serialization.PublicFormat.UncompressedPoint,
+ )
+ )
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXECDH_REPLY)
+
+ def parse_next(self, ptype, m):
+ if self.transport.server_mode and (ptype == _MSG_KEXECDH_INIT):
+ return self._parse_kexecdh_init(m)
+ elif not self.transport.server_mode and (ptype == _MSG_KEXECDH_REPLY):
+ return self._parse_kexecdh_reply(m)
+ raise SSHException(
+ "KexECDH asked to handle packet type {:d}".format(ptype)
+ )
+
+ def _generate_key_pair(self):
+ self.P = ec.generate_private_key(self.curve, default_backend())
+ if self.transport.server_mode:
+ self.Q_S = self.P.public_key()
+ return
+ self.Q_C = self.P.public_key()
+
+ def _parse_kexecdh_init(self, m):
+ Q_C_bytes = m.get_string()
+ self.Q_C = ec.EllipticCurvePublicKey.from_encoded_point(
+ self.curve, Q_C_bytes
+ )
+ K_S = self.transport.get_server_key().asbytes()
+ K = self.P.exchange(ec.ECDH(), self.Q_C)
+ K = int(hexlify(K), 16)
+ # compute exchange hash
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ )
+ hm.add_string(K_S)
+ hm.add_string(Q_C_bytes)
+ # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
+ hm.add_string(
+ self.Q_S.public_bytes(
+ serialization.Encoding.X962,
+ serialization.PublicFormat.UncompressedPoint,
+ )
+ )
+ hm.add_mpint(int(K))
+ H = self.hash_algo(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ sig = self.transport.get_server_key().sign_ssh_data(
+ H, self.transport.host_key_type
+ )
+ # construct reply
+ m = Message()
+ m.add_byte(c_MSG_KEXECDH_REPLY)
+ m.add_string(K_S)
+ m.add_string(
+ self.Q_S.public_bytes(
+ serialization.Encoding.X962,
+ serialization.PublicFormat.UncompressedPoint,
+ )
+ )
+ m.add_string(sig)
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
+
+ def _parse_kexecdh_reply(self, m):
+ K_S = m.get_string()
+ Q_S_bytes = m.get_string()
+ self.Q_S = ec.EllipticCurvePublicKey.from_encoded_point(
+ self.curve, Q_S_bytes
+ )
+ sig = m.get_binary()
+ K = self.P.exchange(ec.ECDH(), self.Q_S)
+ K = int(hexlify(K), 16)
+ # compute exchange hash and verify signature
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ )
+ hm.add_string(K_S)
+ # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion
+ hm.add_string(
+ self.Q_C.public_bytes(
+ serialization.Encoding.X962,
+ serialization.PublicFormat.UncompressedPoint,
+ )
+ )
+ hm.add_string(Q_S_bytes)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
+ self.transport._verify_key(K_S, sig)
+ self.transport._activate_outbound()
+
+
+class KexNistp384(KexNistp256):
+ name = "ecdh-sha2-nistp384"
+ hash_algo = sha384
+ curve = ec.SECP384R1()
+
+
+class KexNistp521(KexNistp256):
+ name = "ecdh-sha2-nistp521"
+ hash_algo = sha512
+ curve = ec.SECP521R1()
diff --git a/lib/paramiko/kex_gex.py b/lib/paramiko/kex_gex.py
new file mode 100644
index 0000000..baa0803
--- /dev/null
+++ b/lib/paramiko/kex_gex.py
@@ -0,0 +1,288 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Variant on `KexGroup1 ` where the prime "p" and
+generator "g" are provided by the server. A bit more work is required on the
+client side, and a **lot** more on the server side.
+"""
+
+import os
+from hashlib import sha1, sha256
+
+from paramiko import util
+from paramiko.common import DEBUG, byte_chr, byte_ord, byte_mask
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+
+
+(
+ _MSG_KEXDH_GEX_REQUEST_OLD,
+ _MSG_KEXDH_GEX_GROUP,
+ _MSG_KEXDH_GEX_INIT,
+ _MSG_KEXDH_GEX_REPLY,
+ _MSG_KEXDH_GEX_REQUEST,
+) = range(30, 35)
+
+(
+ c_MSG_KEXDH_GEX_REQUEST_OLD,
+ c_MSG_KEXDH_GEX_GROUP,
+ c_MSG_KEXDH_GEX_INIT,
+ c_MSG_KEXDH_GEX_REPLY,
+ c_MSG_KEXDH_GEX_REQUEST,
+) = [byte_chr(c) for c in range(30, 35)]
+
+
+class KexGex:
+
+ name = "diffie-hellman-group-exchange-sha1"
+ min_bits = 1024
+ max_bits = 8192
+ preferred_bits = 2048
+ hash_algo = sha1
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.p = None
+ self.q = None
+ self.g = None
+ self.x = None
+ self.e = None
+ self.f = None
+ self.old_style = False
+
+ def start_kex(self, _test_old_style=False):
+ if self.transport.server_mode:
+ self.transport._expect_packet(
+ _MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD
+ )
+ return
+ # request a bit range: we accept (min_bits) to (max_bits), but prefer
+ # (preferred_bits). according to the spec, we shouldn't pull the
+ # minimum up above 1024.
+ m = Message()
+ if _test_old_style:
+ # only used for unit tests: we shouldn't ever send this
+ m.add_byte(c_MSG_KEXDH_GEX_REQUEST_OLD)
+ m.add_int(self.preferred_bits)
+ self.old_style = True
+ else:
+ m.add_byte(c_MSG_KEXDH_GEX_REQUEST)
+ m.add_int(self.min_bits)
+ m.add_int(self.preferred_bits)
+ m.add_int(self.max_bits)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_GROUP)
+
+ def parse_next(self, ptype, m):
+ if ptype == _MSG_KEXDH_GEX_REQUEST:
+ return self._parse_kexdh_gex_request(m)
+ elif ptype == _MSG_KEXDH_GEX_GROUP:
+ return self._parse_kexdh_gex_group(m)
+ elif ptype == _MSG_KEXDH_GEX_INIT:
+ return self._parse_kexdh_gex_init(m)
+ elif ptype == _MSG_KEXDH_GEX_REPLY:
+ return self._parse_kexdh_gex_reply(m)
+ elif ptype == _MSG_KEXDH_GEX_REQUEST_OLD:
+ return self._parse_kexdh_gex_request_old(m)
+ msg = "KexGex {} asked to handle packet type {:d}"
+ raise SSHException(msg.format(self.name, ptype))
+
+ # ...internals...
+
+ def _generate_x(self):
+ # generate an "x" (1 < x < (p-1)/2).
+ q = (self.p - 1) // 2
+ qnorm = util.deflate_long(q, 0)
+ qhbyte = byte_ord(qnorm[0])
+ byte_count = len(qnorm)
+ qmask = 0xFF
+ while not (qhbyte & 0x80):
+ qhbyte <<= 1
+ qmask >>= 1
+ while True:
+ x_bytes = os.urandom(byte_count)
+ x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
+ x = util.inflate_long(x_bytes, 1)
+ if (x > 1) and (x < q):
+ break
+ self.x = x
+
+ def _parse_kexdh_gex_request(self, m):
+ minbits = m.get_int()
+ preferredbits = m.get_int()
+ maxbits = m.get_int()
+ # smoosh the user's preferred size into our own limits
+ if preferredbits > self.max_bits:
+ preferredbits = self.max_bits
+ if preferredbits < self.min_bits:
+ preferredbits = self.min_bits
+ # fix min/max if they're inconsistent. technically, we could just pout
+ # and hang up, but there's no harm in giving them the benefit of the
+ # doubt and just picking a bitsize for them.
+ if minbits > preferredbits:
+ minbits = preferredbits
+ if maxbits < preferredbits:
+ maxbits = preferredbits
+ # now save a copy
+ self.min_bits = minbits
+ self.preferred_bits = preferredbits
+ self.max_bits = maxbits
+ # generate prime
+ pack = self.transport._get_modulus_pack()
+ if pack is None:
+ raise SSHException("Can't do server-side gex with no modulus pack")
+ self.transport._log(
+ DEBUG,
+ "Picking p ({} <= {} <= {} bits)".format(
+ minbits, preferredbits, maxbits
+ ),
+ )
+ self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_GEX_GROUP)
+ m.add_mpint(self.p)
+ m.add_mpint(self.g)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
+
+ def _parse_kexdh_gex_request_old(self, m):
+ # same as above, but without min_bits or max_bits (used by older
+ # clients like putty)
+ self.preferred_bits = m.get_int()
+ # smoosh the user's preferred size into our own limits
+ if self.preferred_bits > self.max_bits:
+ self.preferred_bits = self.max_bits
+ if self.preferred_bits < self.min_bits:
+ self.preferred_bits = self.min_bits
+ # generate prime
+ pack = self.transport._get_modulus_pack()
+ if pack is None:
+ raise SSHException("Can't do server-side gex with no modulus pack")
+ self.transport._log(
+ DEBUG, "Picking p (~ {} bits)".format(self.preferred_bits)
+ )
+ self.g, self.p = pack.get_modulus(
+ self.min_bits, self.preferred_bits, self.max_bits
+ )
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_GEX_GROUP)
+ m.add_mpint(self.p)
+ m.add_mpint(self.g)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
+ self.old_style = True
+
+ def _parse_kexdh_gex_group(self, m):
+ self.p = m.get_mpint()
+ self.g = m.get_mpint()
+ # reject if p's bit length < 1024 or > 8192
+ bitlen = util.bit_length(self.p)
+ if (bitlen < 1024) or (bitlen > 8192):
+ raise SSHException(
+ "Server-generated gex p (don't ask) is out of range "
+ "({} bits)".format(bitlen)
+ )
+ self.transport._log(DEBUG, "Got server p ({} bits)".format(bitlen))
+ self._generate_x()
+ # now compute e = g^x mod p
+ self.e = pow(self.g, self.x, self.p)
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_GEX_INIT)
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_GEX_REPLY)
+
+ def _parse_kexdh_gex_init(self, m):
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.p - 1):
+ raise SSHException('Client kex "e" is out of range')
+ self._generate_x()
+ self.f = pow(self.g, self.x, self.p)
+ K = pow(self.e, self.x, self.p)
+ key = self.transport.get_server_key().asbytes()
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ key,
+ )
+ if not self.old_style:
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ if not self.old_style:
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = self.hash_algo(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ # sign it
+ sig = self.transport.get_server_key().sign_ssh_data(
+ H, self.transport.host_key_type
+ )
+ # send reply
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_GEX_REPLY)
+ m.add_string(key)
+ m.add_mpint(self.f)
+ m.add_string(sig)
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
+
+ def _parse_kexdh_gex_reply(self, m):
+ host_key = m.get_string()
+ self.f = m.get_mpint()
+ sig = m.get_string()
+ if (self.f < 1) or (self.f > self.p - 1):
+ raise SSHException('Server kex "f" is out of range')
+ K = pow(self.f, self.x, self.p)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ host_key,
+ )
+ if not self.old_style:
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ if not self.old_style:
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
+ self.transport._verify_key(host_key, sig)
+ self.transport._activate_outbound()
+
+
+class KexGexSHA256(KexGex):
+ name = "diffie-hellman-group-exchange-sha256"
+ hash_algo = sha256
diff --git a/lib/paramiko/kex_group1.py b/lib/paramiko/kex_group1.py
new file mode 100644
index 0000000..f074256
--- /dev/null
+++ b/lib/paramiko/kex_group1.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
+1024 bit key halves, using a known "p" prime and "g" generator.
+"""
+
+import os
+from hashlib import sha1
+
+from paramiko import util
+from paramiko.common import max_byte, zero_byte, byte_chr, byte_mask
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+
+
+_MSG_KEXDH_INIT, _MSG_KEXDH_REPLY = range(30, 32)
+c_MSG_KEXDH_INIT, c_MSG_KEXDH_REPLY = [byte_chr(c) for c in range(30, 32)]
+
+b7fffffffffffffff = byte_chr(0x7F) + max_byte * 7
+b0000000000000000 = zero_byte * 8
+
+
+class KexGroup1:
+
+ # draft-ietf-secsh-transport-09.txt, page 17
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
+ G = 2
+
+ name = "diffie-hellman-group1-sha1"
+ hash_algo = sha1
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.x = 0
+ self.e = 0
+ self.f = 0
+
+ def start_kex(self):
+ self._generate_x()
+ if self.transport.server_mode:
+ # compute f = g^x mod p, but don't send it yet
+ self.f = pow(self.G, self.x, self.P)
+ self.transport._expect_packet(_MSG_KEXDH_INIT)
+ return
+ # compute e = g^x mod p (where g=2), and send it
+ self.e = pow(self.G, self.x, self.P)
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_INIT)
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(_MSG_KEXDH_REPLY)
+
+ def parse_next(self, ptype, m):
+ if self.transport.server_mode and (ptype == _MSG_KEXDH_INIT):
+ return self._parse_kexdh_init(m)
+ elif not self.transport.server_mode and (ptype == _MSG_KEXDH_REPLY):
+ return self._parse_kexdh_reply(m)
+ msg = "KexGroup1 asked to handle packet type {:d}"
+ raise SSHException(msg.format(ptype))
+
+ # ...internals...
+
+ def _generate_x(self):
+ # generate an "x" (1 < x < q), where q is (p-1)/2.
+ # p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
+ # therefore q can be approximated as a 2^1023. we drop the subset of
+ # potential x where the first 63 bits are 1, because some of those
+ # will be larger than q (but this is a tiny tiny subset of
+ # potential x).
+ while 1:
+ x_bytes = os.urandom(128)
+ x_bytes = byte_mask(x_bytes[0], 0x7F) + x_bytes[1:]
+ if (
+ x_bytes[:8] != b7fffffffffffffff
+ and x_bytes[:8] != b0000000000000000
+ ):
+ break
+ self.x = util.inflate_long(x_bytes)
+
+ def _parse_kexdh_reply(self, m):
+ # client mode
+ host_key = m.get_string()
+ self.f = m.get_mpint()
+ if (self.f < 1) or (self.f > self.P - 1):
+ raise SSHException('Server kex "f" is out of range')
+ sig = m.get_binary()
+ K = pow(self.f, self.x, self.P)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ )
+ hm.add_string(host_key)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
+ self.transport._verify_key(host_key, sig)
+ self.transport._activate_outbound()
+
+ def _parse_kexdh_init(self, m):
+ # server mode
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.P - 1):
+ raise SSHException('Client kex "e" is out of range')
+ K = pow(self.e, self.x, self.P)
+ key = self.transport.get_server_key().asbytes()
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ )
+ hm.add_string(key)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = self.hash_algo(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ # sign it
+ sig = self.transport.get_server_key().sign_ssh_data(
+ H, self.transport.host_key_type
+ )
+ # send reply
+ m = Message()
+ m.add_byte(c_MSG_KEXDH_REPLY)
+ m.add_string(key)
+ m.add_mpint(self.f)
+ m.add_string(sig)
+ self.transport._send_message(m)
+ self.transport._activate_outbound()
diff --git a/lib/paramiko/kex_group14.py b/lib/paramiko/kex_group14.py
new file mode 100644
index 0000000..8dee551
--- /dev/null
+++ b/lib/paramiko/kex_group14.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2013 Torsten Landschoff
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
+2048 bit key halves, using a known "p" prime and "g" generator.
+"""
+
+from paramiko.kex_group1 import KexGroup1
+from hashlib import sha1, sha256
+
+
+class KexGroup14(KexGroup1):
+
+ # http://tools.ietf.org/html/rfc3526#section-3
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
+ G = 2
+
+ name = "diffie-hellman-group14-sha1"
+ hash_algo = sha1
+
+
+class KexGroup14SHA256(KexGroup14):
+ name = "diffie-hellman-group14-sha256"
+ hash_algo = sha256
diff --git a/lib/paramiko/kex_group16.py b/lib/paramiko/kex_group16.py
new file mode 100644
index 0000000..c675f87
--- /dev/null
+++ b/lib/paramiko/kex_group16.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2019 Edgar Sousa
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of
+4096 bit key halves, using a known "p" prime and "g" generator.
+"""
+
+from paramiko.kex_group1 import KexGroup1
+from hashlib import sha512
+
+
+class KexGroup16SHA512(KexGroup1):
+ name = "diffie-hellman-group16-sha512"
+ # http://tools.ietf.org/html/rfc3526#section-5
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF # noqa
+ G = 2
+
+ name = "diffie-hellman-group16-sha512"
+ hash_algo = sha512
diff --git a/lib/paramiko/kex_gss.py b/lib/paramiko/kex_gss.py
new file mode 100644
index 0000000..2a5f29e
--- /dev/null
+++ b/lib/paramiko/kex_gss.py
@@ -0,0 +1,686 @@
+# Copyright (C) 2003-2007 Robey Pointer
+# Copyright (C) 2013-2014 science + computing ag
+# Author: Sebastian Deiss
+#
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+"""
+This module provides GSS-API / SSPI Key Exchange as defined in :rfc:`4462`.
+
+.. note:: Credential delegation is not supported in server mode.
+
+.. note::
+ `RFC 4462 Section 2.2
+ `_ says we are not
+ required to implement GSS-API error messages. Thus, in many methods within
+ this module, if an error occurs an exception will be thrown and the
+ connection will be terminated.
+
+.. seealso:: :doc:`/api/ssh_gss`
+
+.. versionadded:: 1.15
+"""
+
+import os
+from hashlib import sha1
+
+from paramiko.common import (
+ DEBUG,
+ max_byte,
+ zero_byte,
+ byte_chr,
+ byte_mask,
+ byte_ord,
+)
+from paramiko import util
+from paramiko.message import Message
+from paramiko.ssh_exception import SSHException
+
+
+(
+ MSG_KEXGSS_INIT,
+ MSG_KEXGSS_CONTINUE,
+ MSG_KEXGSS_COMPLETE,
+ MSG_KEXGSS_HOSTKEY,
+ MSG_KEXGSS_ERROR,
+) = range(30, 35)
+(MSG_KEXGSS_GROUPREQ, MSG_KEXGSS_GROUP) = range(40, 42)
+(
+ c_MSG_KEXGSS_INIT,
+ c_MSG_KEXGSS_CONTINUE,
+ c_MSG_KEXGSS_COMPLETE,
+ c_MSG_KEXGSS_HOSTKEY,
+ c_MSG_KEXGSS_ERROR,
+) = [byte_chr(c) for c in range(30, 35)]
+(c_MSG_KEXGSS_GROUPREQ, c_MSG_KEXGSS_GROUP) = [
+ byte_chr(c) for c in range(40, 42)
+]
+
+
+class KexGSSGroup1:
+ """
+ GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange as defined in `RFC
+ 4462 Section 2 `_
+ """
+
+ # draft-ietf-secsh-transport-09.txt, page 17
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
+ G = 2
+ b7fffffffffffffff = byte_chr(0x7F) + max_byte * 7 # noqa
+ b0000000000000000 = zero_byte * 8 # noqa
+ NAME = "gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g=="
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.kexgss = self.transport.kexgss_ctxt
+ self.gss_host = None
+ self.x = 0
+ self.e = 0
+ self.f = 0
+
+ def start_kex(self):
+ """
+ Start the GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange.
+ """
+ self._generate_x()
+ if self.transport.server_mode:
+ # compute f = g^x mod p, but don't send it yet
+ self.f = pow(self.G, self.x, self.P)
+ self.transport._expect_packet(MSG_KEXGSS_INIT)
+ return
+ # compute e = g^x mod p (where g=2), and send it
+ self.e = pow(self.G, self.x, self.P)
+ # Initialize GSS-API Key Exchange
+ self.gss_host = self.transport.gss_host
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_INIT)
+ m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host))
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_HOSTKEY,
+ MSG_KEXGSS_CONTINUE,
+ MSG_KEXGSS_COMPLETE,
+ MSG_KEXGSS_ERROR,
+ )
+
+ def parse_next(self, ptype, m):
+ """
+ Parse the next packet.
+
+ :param ptype: The (string) type of the incoming packet
+ :param `.Message` m: The packet content
+ """
+ if self.transport.server_mode and (ptype == MSG_KEXGSS_INIT):
+ return self._parse_kexgss_init(m)
+ elif not self.transport.server_mode and (ptype == MSG_KEXGSS_HOSTKEY):
+ return self._parse_kexgss_hostkey(m)
+ elif self.transport.server_mode and (ptype == MSG_KEXGSS_CONTINUE):
+ return self._parse_kexgss_continue(m)
+ elif not self.transport.server_mode and (ptype == MSG_KEXGSS_COMPLETE):
+ return self._parse_kexgss_complete(m)
+ elif ptype == MSG_KEXGSS_ERROR:
+ return self._parse_kexgss_error(m)
+ msg = "GSS KexGroup1 asked to handle packet type {:d}"
+ raise SSHException(msg.format(ptype))
+
+ # ## internals...
+
+ def _generate_x(self):
+ """
+ generate an "x" (1 < x < q), where q is (p-1)/2.
+ p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
+ therefore q can be approximated as a 2^1023. we drop the subset of
+ potential x where the first 63 bits are 1, because some of those will
+ be larger than q (but this is a tiny tiny subset of potential x).
+ """
+ while 1:
+ x_bytes = os.urandom(128)
+ x_bytes = byte_mask(x_bytes[0], 0x7F) + x_bytes[1:]
+ first = x_bytes[:8]
+ if first not in (self.b7fffffffffffffff, self.b0000000000000000):
+ break
+ self.x = util.inflate_long(x_bytes)
+
+ def _parse_kexgss_hostkey(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_HOSTKEY message (client mode).
+
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_HOSTKEY message
+ """
+ # client mode
+ host_key = m.get_string()
+ self.transport.host_key = host_key
+ sig = m.get_string()
+ self.transport._verify_key(host_key, sig)
+ self.transport._expect_packet(MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE)
+
+ def _parse_kexgss_continue(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_CONTINUE message.
+
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE
+ message
+ """
+ if not self.transport.server_mode:
+ srv_token = m.get_string()
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_CONTINUE)
+ m.add_string(
+ self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token
+ )
+ )
+ self.transport.send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
+ )
+ else:
+ pass
+
+ def _parse_kexgss_complete(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_COMPLETE message (client mode).
+
+ :param `.Message` m: The content of the
+ SSH2_MSG_KEXGSS_COMPLETE message
+ """
+ # client mode
+ if self.transport.host_key is None:
+ self.transport.host_key = NullHostKey()
+ self.f = m.get_mpint()
+ if (self.f < 1) or (self.f > self.P - 1):
+ raise SSHException('Server kex "f" is out of range')
+ mic_token = m.get_string()
+ # This must be TRUE, if there is a GSS-API token in this message.
+ bool = m.get_boolean()
+ srv_token = None
+ if bool:
+ srv_token = m.get_string()
+ K = pow(self.f, self.x, self.P)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ )
+ hm.add_string(self.transport.host_key.__str__())
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = sha1(str(hm)).digest()
+ self.transport._set_K_H(K, H)
+ if srv_token is not None:
+ self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token
+ )
+ self.kexgss.ssh_check_mic(mic_token, H)
+ else:
+ self.kexgss.ssh_check_mic(mic_token, H)
+ self.transport.gss_kex_used = True
+ self.transport._activate_outbound()
+
+ def _parse_kexgss_init(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_INIT message (server mode).
+
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_INIT message
+ """
+ # server mode
+ client_token = m.get_string()
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.P - 1):
+ raise SSHException('Client kex "e" is out of range')
+ K = pow(self.e, self.x, self.P)
+ self.transport.host_key = NullHostKey()
+ key = self.transport.host_key.__str__()
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ )
+ hm.add_string(key)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = sha1(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ srv_token = self.kexgss.ssh_accept_sec_context(
+ self.gss_host, client_token
+ )
+ m = Message()
+ if self.kexgss._gss_srv_ctxt_status:
+ mic_token = self.kexgss.ssh_get_mic(
+ self.transport.session_id, gss_kex=True
+ )
+ m.add_byte(c_MSG_KEXGSS_COMPLETE)
+ m.add_mpint(self.f)
+ m.add_string(mic_token)
+ if srv_token is not None:
+ m.add_boolean(True)
+ m.add_string(srv_token)
+ else:
+ m.add_boolean(False)
+ self.transport._send_message(m)
+ self.transport.gss_kex_used = True
+ self.transport._activate_outbound()
+ else:
+ m.add_byte(c_MSG_KEXGSS_CONTINUE)
+ m.add_string(srv_token)
+ self.transport._send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
+ )
+
+ def _parse_kexgss_error(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_ERROR message (client mode).
+ The server may send a GSS-API error message. if it does, we display
+ the error by throwing an exception (client mode).
+
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_ERROR message
+ :raise SSHException: Contains GSS-API major and minor status as well as
+ the error message and the language tag of the
+ message
+ """
+ maj_status = m.get_int()
+ min_status = m.get_int()
+ err_msg = m.get_string()
+ m.get_string() # we don't care about the language!
+ raise SSHException(
+ """GSS-API Error:
+Major Status: {}
+Minor Status: {}
+Error Message: {}
+""".format(
+ maj_status, min_status, err_msg
+ )
+ )
+
+
+class KexGSSGroup14(KexGSSGroup1):
+ """
+ GSS-API / SSPI Authenticated Diffie-Hellman Group14 Key Exchange as defined
+ in `RFC 4462 Section 2
+ `_
+ """
+
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
+ G = 2
+ NAME = "gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g=="
+
+
+class KexGSSGex:
+ """
+ GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange as defined in
+ `RFC 4462 Section 2 `_
+ """
+
+ NAME = "gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g=="
+ min_bits = 1024
+ max_bits = 8192
+ preferred_bits = 2048
+
+ def __init__(self, transport):
+ self.transport = transport
+ self.kexgss = self.transport.kexgss_ctxt
+ self.gss_host = None
+ self.p = None
+ self.q = None
+ self.g = None
+ self.x = None
+ self.e = None
+ self.f = None
+ self.old_style = False
+
+ def start_kex(self):
+ """
+ Start the GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange
+ """
+ if self.transport.server_mode:
+ self.transport._expect_packet(MSG_KEXGSS_GROUPREQ)
+ return
+ # request a bit range: we accept (min_bits) to (max_bits), but prefer
+ # (preferred_bits). according to the spec, we shouldn't pull the
+ # minimum up above 1024.
+ self.gss_host = self.transport.gss_host
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_GROUPREQ)
+ m.add_int(self.min_bits)
+ m.add_int(self.preferred_bits)
+ m.add_int(self.max_bits)
+ self.transport._send_message(m)
+ self.transport._expect_packet(MSG_KEXGSS_GROUP)
+
+ def parse_next(self, ptype, m):
+ """
+ Parse the next packet.
+
+ :param ptype: The (string) type of the incoming packet
+ :param `.Message` m: The packet content
+ """
+ if ptype == MSG_KEXGSS_GROUPREQ:
+ return self._parse_kexgss_groupreq(m)
+ elif ptype == MSG_KEXGSS_GROUP:
+ return self._parse_kexgss_group(m)
+ elif ptype == MSG_KEXGSS_INIT:
+ return self._parse_kexgss_gex_init(m)
+ elif ptype == MSG_KEXGSS_HOSTKEY:
+ return self._parse_kexgss_hostkey(m)
+ elif ptype == MSG_KEXGSS_CONTINUE:
+ return self._parse_kexgss_continue(m)
+ elif ptype == MSG_KEXGSS_COMPLETE:
+ return self._parse_kexgss_complete(m)
+ elif ptype == MSG_KEXGSS_ERROR:
+ return self._parse_kexgss_error(m)
+ msg = "KexGex asked to handle packet type {:d}"
+ raise SSHException(msg.format(ptype))
+
+ # ## internals...
+
+ def _generate_x(self):
+ # generate an "x" (1 < x < (p-1)/2).
+ q = (self.p - 1) // 2
+ qnorm = util.deflate_long(q, 0)
+ qhbyte = byte_ord(qnorm[0])
+ byte_count = len(qnorm)
+ qmask = 0xFF
+ while not (qhbyte & 0x80):
+ qhbyte <<= 1
+ qmask >>= 1
+ while True:
+ x_bytes = os.urandom(byte_count)
+ x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
+ x = util.inflate_long(x_bytes, 1)
+ if (x > 1) and (x < q):
+ break
+ self.x = x
+
+ def _parse_kexgss_groupreq(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_GROUPREQ message (server mode).
+
+ :param `.Message` m: The content of the
+ SSH2_MSG_KEXGSS_GROUPREQ message
+ """
+ minbits = m.get_int()
+ preferredbits = m.get_int()
+ maxbits = m.get_int()
+ # smoosh the user's preferred size into our own limits
+ if preferredbits > self.max_bits:
+ preferredbits = self.max_bits
+ if preferredbits < self.min_bits:
+ preferredbits = self.min_bits
+ # fix min/max if they're inconsistent. technically, we could just pout
+ # and hang up, but there's no harm in giving them the benefit of the
+ # doubt and just picking a bitsize for them.
+ if minbits > preferredbits:
+ minbits = preferredbits
+ if maxbits < preferredbits:
+ maxbits = preferredbits
+ # now save a copy
+ self.min_bits = minbits
+ self.preferred_bits = preferredbits
+ self.max_bits = maxbits
+ # generate prime
+ pack = self.transport._get_modulus_pack()
+ if pack is None:
+ raise SSHException("Can't do server-side gex with no modulus pack")
+ self.transport._log(
+ DEBUG, # noqa
+ "Picking p ({} <= {} <= {} bits)".format(
+ minbits, preferredbits, maxbits
+ ),
+ )
+ self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_GROUP)
+ m.add_mpint(self.p)
+ m.add_mpint(self.g)
+ self.transport._send_message(m)
+ self.transport._expect_packet(MSG_KEXGSS_INIT)
+
+ def _parse_kexgss_group(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_GROUP message (client mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_GROUP message
+ """
+ self.p = m.get_mpint()
+ self.g = m.get_mpint()
+ # reject if p's bit length < 1024 or > 8192
+ bitlen = util.bit_length(self.p)
+ if (bitlen < 1024) or (bitlen > 8192):
+ raise SSHException(
+ "Server-generated gex p (don't ask) is out of range "
+ "({} bits)".format(bitlen)
+ )
+ self.transport._log(
+ DEBUG, "Got server p ({} bits)".format(bitlen)
+ ) # noqa
+ self._generate_x()
+ # now compute e = g^x mod p
+ self.e = pow(self.g, self.x, self.p)
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_INIT)
+ m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host))
+ m.add_mpint(self.e)
+ self.transport._send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_HOSTKEY,
+ MSG_KEXGSS_CONTINUE,
+ MSG_KEXGSS_COMPLETE,
+ MSG_KEXGSS_ERROR,
+ )
+
+ def _parse_kexgss_gex_init(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_INIT message (server mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_INIT message
+ """
+ client_token = m.get_string()
+ self.e = m.get_mpint()
+ if (self.e < 1) or (self.e > self.p - 1):
+ raise SSHException('Client kex "e" is out of range')
+ self._generate_x()
+ self.f = pow(self.g, self.x, self.p)
+ K = pow(self.e, self.x, self.p)
+ self.transport.host_key = NullHostKey()
+ key = self.transport.host_key.__str__()
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
+ hm = Message()
+ hm.add(
+ self.transport.remote_version,
+ self.transport.local_version,
+ self.transport.remote_kex_init,
+ self.transport.local_kex_init,
+ key,
+ )
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = sha1(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ srv_token = self.kexgss.ssh_accept_sec_context(
+ self.gss_host, client_token
+ )
+ m = Message()
+ if self.kexgss._gss_srv_ctxt_status:
+ mic_token = self.kexgss.ssh_get_mic(
+ self.transport.session_id, gss_kex=True
+ )
+ m.add_byte(c_MSG_KEXGSS_COMPLETE)
+ m.add_mpint(self.f)
+ m.add_string(mic_token)
+ if srv_token is not None:
+ m.add_boolean(True)
+ m.add_string(srv_token)
+ else:
+ m.add_boolean(False)
+ self.transport._send_message(m)
+ self.transport.gss_kex_used = True
+ self.transport._activate_outbound()
+ else:
+ m.add_byte(c_MSG_KEXGSS_CONTINUE)
+ m.add_string(srv_token)
+ self.transport._send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
+ )
+
+ def _parse_kexgss_hostkey(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_HOSTKEY message (client mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_HOSTKEY message
+ """
+ # client mode
+ host_key = m.get_string()
+ self.transport.host_key = host_key
+ sig = m.get_string()
+ self.transport._verify_key(host_key, sig)
+ self.transport._expect_packet(MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE)
+
+ def _parse_kexgss_continue(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_CONTINUE message.
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE message
+ """
+ if not self.transport.server_mode:
+ srv_token = m.get_string()
+ m = Message()
+ m.add_byte(c_MSG_KEXGSS_CONTINUE)
+ m.add_string(
+ self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token
+ )
+ )
+ self.transport.send_message(m)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_ERROR
+ )
+ else:
+ pass
+
+ def _parse_kexgss_complete(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_COMPLETE message (client mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_COMPLETE message
+ """
+ if self.transport.host_key is None:
+ self.transport.host_key = NullHostKey()
+ self.f = m.get_mpint()
+ mic_token = m.get_string()
+ # This must be TRUE, if there is a GSS-API token in this message.
+ bool = m.get_boolean()
+ srv_token = None
+ if bool:
+ srv_token = m.get_string()
+ if (self.f < 1) or (self.f > self.p - 1):
+ raise SSHException('Server kex "f" is out of range')
+ K = pow(self.f, self.x, self.p)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
+ hm = Message()
+ hm.add(
+ self.transport.local_version,
+ self.transport.remote_version,
+ self.transport.local_kex_init,
+ self.transport.remote_kex_init,
+ self.transport.host_key.__str__(),
+ )
+ if not self.old_style:
+ hm.add_int(self.min_bits)
+ hm.add_int(self.preferred_bits)
+ if not self.old_style:
+ hm.add_int(self.max_bits)
+ hm.add_mpint(self.p)
+ hm.add_mpint(self.g)
+ hm.add_mpint(self.e)
+ hm.add_mpint(self.f)
+ hm.add_mpint(K)
+ H = sha1(hm.asbytes()).digest()
+ self.transport._set_K_H(K, H)
+ if srv_token is not None:
+ self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token
+ )
+ self.kexgss.ssh_check_mic(mic_token, H)
+ else:
+ self.kexgss.ssh_check_mic(mic_token, H)
+ self.transport.gss_kex_used = True
+ self.transport._activate_outbound()
+
+ def _parse_kexgss_error(self, m):
+ """
+ Parse the SSH2_MSG_KEXGSS_ERROR message (client mode).
+ The server may send a GSS-API error message. if it does, we display
+ the error by throwing an exception (client mode).
+
+ :param `Message` m: The content of the SSH2_MSG_KEXGSS_ERROR message
+ :raise SSHException: Contains GSS-API major and minor status as well as
+ the error message and the language tag of the
+ message
+ """
+ maj_status = m.get_int()
+ min_status = m.get_int()
+ err_msg = m.get_string()
+ m.get_string() # we don't care about the language (lang_tag)!
+ raise SSHException(
+ """GSS-API Error:
+Major Status: {}
+Minor Status: {}
+Error Message: {}
+""".format(
+ maj_status, min_status, err_msg
+ )
+ )
+
+
+class NullHostKey:
+ """
+ This class represents the Null Host Key for GSS-API Key Exchange as defined
+ in `RFC 4462 Section 5
+ `_
+ """
+
+ def __init__(self):
+ self.key = ""
+
+ def __str__(self):
+ return self.key
+
+ def get_name(self):
+ return self.key
diff --git a/lib/paramiko/message.py b/lib/paramiko/message.py
new file mode 100644
index 0000000..8c2b3bd
--- /dev/null
+++ b/lib/paramiko/message.py
@@ -0,0 +1,318 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Implementation of an SSH2 "message".
+"""
+
+import struct
+from io import BytesIO
+
+from paramiko import util
+from paramiko.common import zero_byte, max_byte, one_byte
+from paramiko.util import u
+
+
+class Message:
+ """
+ An SSH2 message is a stream of bytes that encodes some combination of
+ strings, integers, bools, and infinite-precision integers. This class
+ builds or breaks down such a byte stream.
+
+ Normally you don't need to deal with anything this low-level, but it's
+ exposed for people implementing custom extensions, or features that
+ paramiko doesn't support yet.
+ """
+
+ big_int = 0xFF000000
+
+ def __init__(self, content=None):
+ """
+ Create a new SSH2 message.
+
+ :param bytes content:
+ the byte stream to use as the message content (passed in only when
+ decomposing a message).
+ """
+ if content is not None:
+ self.packet = BytesIO(content)
+ else:
+ self.packet = BytesIO()
+
+ def __bytes__(self):
+ return self.asbytes()
+
+ def __repr__(self):
+ """
+ Returns a string representation of this object, for debugging.
+ """
+ return "paramiko.Message(" + repr(self.packet.getvalue()) + ")"
+
+ # TODO 4.0: just merge into __bytes__ (everywhere)
+ def asbytes(self):
+ """
+ Return the byte stream content of this Message, as a `bytes`.
+ """
+ return self.packet.getvalue()
+
+ def rewind(self):
+ """
+ Rewind the message to the beginning as if no items had been parsed
+ out of it yet.
+ """
+ self.packet.seek(0)
+
+ def get_remainder(self):
+ """
+ Return the `bytes` of this message that haven't already been parsed and
+ returned.
+ """
+ position = self.packet.tell()
+ remainder = self.packet.read()
+ self.packet.seek(position)
+ return remainder
+
+ def get_so_far(self):
+ """
+ Returns the `bytes` of this message that have been parsed and
+ returned. The string passed into a message's constructor can be
+ regenerated by concatenating ``get_so_far`` and `get_remainder`.
+ """
+ position = self.packet.tell()
+ self.rewind()
+ return self.packet.read(position)
+
+ def get_bytes(self, n):
+ """
+ Return the next ``n`` bytes of the message, without decomposing into an
+ int, decoded string, etc. Just the raw bytes are returned. Returns a
+ string of ``n`` zero bytes if there weren't ``n`` bytes remaining in
+ the message.
+ """
+ b = self.packet.read(n)
+ max_pad_size = 1 << 20 # Limit padding to 1 MB
+ if len(b) < n < max_pad_size:
+ return b + zero_byte * (n - len(b))
+ return b
+
+ def get_byte(self):
+ """
+ Return the next byte of the message, without decomposing it. This
+ is equivalent to `get_bytes(1) `.
+
+ :return:
+ the next (`bytes`) byte of the message, or ``b'\000'`` if there
+ aren't any bytes remaining.
+ """
+ return self.get_bytes(1)
+
+ def get_boolean(self):
+ """
+ Fetch a boolean from the stream.
+ """
+ b = self.get_bytes(1)
+ return b != zero_byte
+
+ def get_adaptive_int(self):
+ """
+ Fetch an int from the stream.
+
+ :return: a 32-bit unsigned `int`.
+ """
+ byte = self.get_bytes(1)
+ if byte == max_byte:
+ return util.inflate_long(self.get_binary())
+ byte += self.get_bytes(3)
+ return struct.unpack(">I", byte)[0]
+
+ def get_int(self):
+ """
+ Fetch an int from the stream.
+ """
+ return struct.unpack(">I", self.get_bytes(4))[0]
+
+ def get_int64(self):
+ """
+ Fetch a 64-bit int from the stream.
+
+ :return: a 64-bit unsigned integer (`int`).
+ """
+ return struct.unpack(">Q", self.get_bytes(8))[0]
+
+ def get_mpint(self):
+ """
+ Fetch a long int (mpint) from the stream.
+
+ :return: an arbitrary-length integer (`int`).
+ """
+ return util.inflate_long(self.get_binary())
+
+ # TODO 4.0: depending on where this is used internally or downstream, force
+ # users to specify get_binary instead and delete this.
+ def get_string(self):
+ """
+ Fetch a "string" from the stream. This will actually be a `bytes`
+ object, and may contain unprintable characters. (It's not unheard of
+ for a string to contain another byte-stream message.)
+ """
+ return self.get_bytes(self.get_int())
+
+ # TODO 4.0: also consider having this take over the get_string name, and
+ # remove this name instead.
+ def get_text(self):
+ """
+ Fetch a Unicode string from the stream.
+
+ This currently operates by attempting to encode the next "string" as
+ ``utf-8``.
+ """
+ return u(self.get_string())
+
+ def get_binary(self):
+ """
+ Alias for `get_string` (obtains a bytestring).
+ """
+ return self.get_bytes(self.get_int())
+
+ def get_list(self):
+ """
+ Fetch a list of `strings ` from the stream.
+
+ These are trivially encoded as comma-separated values in a string.
+ """
+ return self.get_text().split(",")
+
+ def add_bytes(self, b):
+ """
+ Write bytes to the stream, without any formatting.
+
+ :param bytes b: bytes to add
+ """
+ self.packet.write(b)
+ return self
+
+ def add_byte(self, b):
+ """
+ Write a single byte to the stream, without any formatting.
+
+ :param bytes b: byte to add
+ """
+ self.packet.write(b)
+ return self
+
+ def add_boolean(self, b):
+ """
+ Add a boolean value to the stream.
+
+ :param bool b: boolean value to add
+ """
+ if b:
+ self.packet.write(one_byte)
+ else:
+ self.packet.write(zero_byte)
+ return self
+
+ def add_int(self, n):
+ """
+ Add an integer to the stream.
+
+ :param int n: integer to add
+ """
+ self.packet.write(struct.pack(">I", n))
+ return self
+
+ def add_adaptive_int(self, n):
+ """
+ Add an integer to the stream.
+
+ :param int n: integer to add
+ """
+ if n >= Message.big_int:
+ self.packet.write(max_byte)
+ self.add_string(util.deflate_long(n))
+ else:
+ self.packet.write(struct.pack(">I", n))
+ return self
+
+ def add_int64(self, n):
+ """
+ Add a 64-bit int to the stream.
+
+ :param int n: long int to add
+ """
+ self.packet.write(struct.pack(">Q", n))
+ return self
+
+ def add_mpint(self, z):
+ """
+ Add a long int to the stream, encoded as an infinite-precision
+ integer. This method only works on positive numbers.
+
+ :param int z: long int to add
+ """
+ self.add_string(util.deflate_long(z))
+ return self
+
+ # TODO: see the TODO for get_string/get_text/et al, this should change
+ # to match.
+ def add_string(self, s):
+ """
+ Add a bytestring to the stream.
+
+ :param byte s: bytestring to add
+ """
+ s = util.asbytes(s)
+ self.add_int(len(s))
+ self.packet.write(s)
+ return self
+
+ def add_list(self, l): # noqa: E741
+ """
+ Add a list of strings to the stream. They are encoded identically to
+ a single string of values separated by commas. (Yes, really, that's
+ how SSH2 does it.)
+
+ :param l: list of strings to add
+ """
+ self.add_string(",".join(l))
+ return self
+
+ def _add(self, i):
+ if type(i) is bool:
+ return self.add_boolean(i)
+ elif isinstance(i, int):
+ return self.add_adaptive_int(i)
+ elif type(i) is list:
+ return self.add_list(i)
+ else:
+ return self.add_string(i)
+
+ # TODO: this would never have worked for unicode strings under Python 3,
+ # guessing nobody/nothing ever used it for that purpose?
+ def add(self, *seq):
+ """
+ Add a sequence of items to the stream. The values are encoded based
+ on their type: bytes, str, int, bool, or list.
+
+ .. warning::
+ Longs are encoded non-deterministically. Don't use this method.
+
+ :param seq: the sequence of items
+ """
+ for item in seq:
+ self._add(item)
diff --git a/lib/paramiko/packet.py b/lib/paramiko/packet.py
new file mode 100644
index 0000000..f1de4b0
--- /dev/null
+++ b/lib/paramiko/packet.py
@@ -0,0 +1,696 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Packet handling
+"""
+
+import errno
+import os
+import socket
+import struct
+import threading
+import time
+from hmac import HMAC
+
+from paramiko import util
+from paramiko.common import (
+ linefeed_byte,
+ cr_byte_value,
+ MSG_NAMES,
+ DEBUG,
+ xffffffff,
+ zero_byte,
+ byte_ord,
+)
+from paramiko.util import u
+from paramiko.ssh_exception import SSHException, ProxyCommandFailure
+from paramiko.message import Message
+
+
+def compute_hmac(key, message, digest_class):
+ return HMAC(key, message, digest_class).digest()
+
+
+class NeedRekeyException(Exception):
+ """
+ Exception indicating a rekey is needed.
+ """
+
+ pass
+
+
+def first_arg(e):
+ arg = None
+ if type(e.args) is tuple and len(e.args) > 0:
+ arg = e.args[0]
+ return arg
+
+
+class Packetizer:
+ """
+ Implementation of the base SSH packet protocol.
+ """
+
+ # READ the secsh RFC's before raising these values. if anything,
+ # they should probably be lower.
+ REKEY_PACKETS = pow(2, 29)
+ REKEY_BYTES = pow(2, 29)
+
+ # Allow receiving this many packets after a re-key request before
+ # terminating
+ REKEY_PACKETS_OVERFLOW_MAX = pow(2, 29)
+ # Allow receiving this many bytes after a re-key request before terminating
+ REKEY_BYTES_OVERFLOW_MAX = pow(2, 29)
+
+ def __init__(self, socket):
+ self.__socket = socket
+ self.__logger = None
+ self.__closed = False
+ self.__dump_packets = False
+ self.__need_rekey = False
+ self.__init_count = 0
+ self.__remainder = bytes()
+ self._initial_kex_done = False
+
+ # used for noticing when to re-key:
+ self.__sent_bytes = 0
+ self.__sent_packets = 0
+ self.__received_bytes = 0
+ self.__received_packets = 0
+ self.__received_bytes_overflow = 0
+ self.__received_packets_overflow = 0
+
+ # current inbound/outbound ciphering:
+ self.__block_size_out = 8
+ self.__block_size_in = 8
+ self.__mac_size_out = 0
+ self.__mac_size_in = 0
+ self.__block_engine_out = None
+ self.__block_engine_in = None
+ self.__sdctr_out = False
+ self.__mac_engine_out = None
+ self.__mac_engine_in = None
+ self.__mac_key_out = bytes()
+ self.__mac_key_in = bytes()
+ self.__compress_engine_out = None
+ self.__compress_engine_in = None
+ self.__sequence_number_out = 0
+ self.__sequence_number_in = 0
+ self.__etm_out = False
+ self.__etm_in = False
+
+ # AEAD (eg aes128-gcm/aes256-gcm) cipher use
+ self.__aead_out = False
+ self.__aead_in = False
+ self.__iv_out = None
+ self.__iv_in = None
+
+ # lock around outbound writes (packet computation)
+ self.__write_lock = threading.RLock()
+
+ # keepalives:
+ self.__keepalive_interval = 0
+ self.__keepalive_last = time.time()
+ self.__keepalive_callback = None
+
+ self.__timer = None
+ self.__handshake_complete = False
+ self.__timer_expired = False
+
+ @property
+ def closed(self):
+ return self.__closed
+
+ def reset_seqno_out(self):
+ self.__sequence_number_out = 0
+
+ def reset_seqno_in(self):
+ self.__sequence_number_in = 0
+
+ def set_log(self, log):
+ """
+ Set the Python log object to use for logging.
+ """
+ self.__logger = log
+
+ def set_outbound_cipher(
+ self,
+ block_engine,
+ block_size,
+ mac_engine,
+ mac_size,
+ mac_key,
+ sdctr=False,
+ etm=False,
+ aead=False,
+ iv_out=None,
+ ):
+ """
+ Switch outbound data cipher.
+ :param etm: Set encrypt-then-mac from OpenSSH
+ """
+ self.__block_engine_out = block_engine
+ self.__sdctr_out = sdctr
+ self.__block_size_out = block_size
+ self.__mac_engine_out = mac_engine
+ self.__mac_size_out = mac_size
+ self.__mac_key_out = mac_key
+ self.__sent_bytes = 0
+ self.__sent_packets = 0
+ self.__etm_out = etm
+ self.__aead_out = aead
+ self.__iv_out = iv_out
+ # wait until the reset happens in both directions before clearing
+ # rekey flag
+ self.__init_count |= 1
+ if self.__init_count == 3:
+ self.__init_count = 0
+ self.__need_rekey = False
+
+ def set_inbound_cipher(
+ self,
+ block_engine,
+ block_size,
+ mac_engine,
+ mac_size,
+ mac_key,
+ etm=False,
+ aead=False,
+ iv_in=None,
+ ):
+ """
+ Switch inbound data cipher.
+ :param etm: Set encrypt-then-mac from OpenSSH
+ """
+ self.__block_engine_in = block_engine
+ self.__block_size_in = block_size
+ self.__mac_engine_in = mac_engine
+ self.__mac_size_in = mac_size
+ self.__mac_key_in = mac_key
+ self.__received_bytes = 0
+ self.__received_packets = 0
+ self.__received_bytes_overflow = 0
+ self.__received_packets_overflow = 0
+ self.__etm_in = etm
+ self.__aead_in = aead
+ self.__iv_in = iv_in
+ # wait until the reset happens in both directions before clearing
+ # rekey flag
+ self.__init_count |= 2
+ if self.__init_count == 3:
+ self.__init_count = 0
+ self.__need_rekey = False
+
+ def set_outbound_compressor(self, compressor):
+ self.__compress_engine_out = compressor
+
+ def set_inbound_compressor(self, compressor):
+ self.__compress_engine_in = compressor
+
+ def close(self):
+ self.__closed = True
+ self.__socket.close()
+
+ def set_hexdump(self, hexdump):
+ self.__dump_packets = hexdump
+
+ def get_hexdump(self):
+ return self.__dump_packets
+
+ def get_mac_size_in(self):
+ return self.__mac_size_in
+
+ def get_mac_size_out(self):
+ return self.__mac_size_out
+
+ def need_rekey(self):
+ """
+ Returns ``True`` if a new set of keys needs to be negotiated. This
+ will be triggered during a packet read or write, so it should be
+ checked after every read or write, or at least after every few.
+ """
+ return self.__need_rekey
+
+ def set_keepalive(self, interval, callback):
+ """
+ Turn on/off the callback keepalive. If ``interval`` seconds pass with
+ no data read from or written to the socket, the callback will be
+ executed and the timer will be reset.
+ """
+ self.__keepalive_interval = interval
+ self.__keepalive_callback = callback
+ self.__keepalive_last = time.time()
+
+ def read_timer(self):
+ self.__timer_expired = True
+
+ def start_handshake(self, timeout):
+ """
+ Tells `Packetizer` that the handshake process started.
+ Starts a book keeping timer that can signal a timeout in the
+ handshake process.
+
+ :param float timeout: amount of seconds to wait before timing out
+ """
+ if not self.__timer:
+ self.__timer = threading.Timer(float(timeout), self.read_timer)
+ self.__timer.start()
+
+ def handshake_timed_out(self):
+ """
+ Checks if the handshake has timed out.
+
+ If `start_handshake` wasn't called before the call to this function,
+ the return value will always be `False`. If the handshake completed
+ before a timeout was reached, the return value will be `False`
+
+ :return: handshake time out status, as a `bool`
+ """
+ if not self.__timer:
+ return False
+ if self.__handshake_complete:
+ return False
+ return self.__timer_expired
+
+ def complete_handshake(self):
+ """
+ Tells `Packetizer` that the handshake has completed.
+ """
+ if self.__timer:
+ self.__timer.cancel()
+ self.__timer_expired = False
+ self.__handshake_complete = True
+
+ def read_all(self, n, check_rekey=False):
+ """
+ Read as close to N bytes as possible, blocking as long as necessary.
+
+ :param int n: number of bytes to read
+ :return: the data read, as a `str`
+
+ :raises:
+ ``EOFError`` -- if the socket was closed before all the bytes could
+ be read
+ """
+ out = bytes()
+ # handle over-reading from reading the banner line
+ if len(self.__remainder) > 0:
+ out = self.__remainder[:n]
+ self.__remainder = self.__remainder[n:]
+ n -= len(out)
+ while n > 0:
+ got_timeout = False
+ if self.handshake_timed_out():
+ raise EOFError()
+ try:
+ x = self.__socket.recv(n)
+ if len(x) == 0:
+ raise EOFError()
+ out += x
+ n -= len(x)
+ except socket.timeout:
+ got_timeout = True
+ except socket.error as e:
+ # on Linux, sometimes instead of socket.timeout, we get
+ # EAGAIN. this is a bug in recent (> 2.6.9) kernels but
+ # we need to work around it.
+ arg = first_arg(e)
+ if arg == errno.EAGAIN:
+ got_timeout = True
+ elif self.__closed:
+ raise EOFError()
+ else:
+ raise
+ if got_timeout:
+ if self.__closed:
+ raise EOFError()
+ if check_rekey and (len(out) == 0) and self.__need_rekey:
+ raise NeedRekeyException()
+ self._check_keepalive()
+ return out
+
+ def write_all(self, out):
+ self.__keepalive_last = time.time()
+ iteration_with_zero_as_return_value = 0
+ while len(out) > 0:
+ retry_write = False
+ try:
+ n = self.__socket.send(out)
+ except socket.timeout:
+ retry_write = True
+ except socket.error as e:
+ arg = first_arg(e)
+ if arg == errno.EAGAIN:
+ retry_write = True
+ else:
+ n = -1
+ except ProxyCommandFailure:
+ raise # so it doesn't get swallowed by the below catchall
+ except Exception:
+ # could be: (32, 'Broken pipe')
+ n = -1
+ if retry_write:
+ n = 0
+ if self.__closed:
+ n = -1
+ else:
+ if n == 0 and iteration_with_zero_as_return_value > 10:
+ # We shouldn't retry the write, but we didn't
+ # manage to send anything over the socket. This might be an
+ # indication that we have lost contact with the remote
+ # side, but are yet to receive an EOFError or other socket
+ # errors. Let's give it some iteration to try and catch up.
+ n = -1
+ iteration_with_zero_as_return_value += 1
+ if n < 0:
+ raise EOFError()
+ if n == len(out):
+ break
+ out = out[n:]
+ return
+
+ def readline(self, timeout):
+ """
+ Read a line from the socket. We assume no data is pending after the
+ line, so it's okay to attempt large reads.
+ """
+ buf = self.__remainder
+ while linefeed_byte not in buf:
+ buf += self._read_timeout(timeout)
+ n = buf.index(linefeed_byte)
+ self.__remainder = buf[n + 1 :]
+ buf = buf[:n]
+ if (len(buf) > 0) and (buf[-1] == cr_byte_value):
+ buf = buf[:-1]
+ return u(buf)
+
+ def _inc_iv_counter(self, iv):
+ # Per https://www.rfc-editor.org/rfc/rfc5647.html#section-7.1 ,
+ # we increment the last 8 bytes of the 12-byte IV...
+ iv_counter_b = iv[4:]
+ iv_counter = int.from_bytes(iv_counter_b, "big")
+ inc_iv_counter = iv_counter + 1
+ inc_iv_counter_b = inc_iv_counter.to_bytes(8, "big")
+ # ...then re-concatenate it with the static first 4 bytes
+ new_iv = iv[0:4] + inc_iv_counter_b
+ return new_iv
+
+ def send_message(self, data):
+ """
+ Write a block of data using the current cipher, as an SSH block.
+ """
+ # encrypt this sucka
+ data = data.asbytes()
+ cmd = byte_ord(data[0])
+ if cmd in MSG_NAMES:
+ cmd_name = MSG_NAMES[cmd]
+ else:
+ cmd_name = "${:x}".format(cmd)
+ orig_len = len(data)
+ self.__write_lock.acquire()
+ try:
+ if self.__compress_engine_out is not None:
+ data = self.__compress_engine_out(data)
+ packet = self._build_packet(data)
+ if self.__dump_packets:
+ self._log(
+ DEBUG,
+ "Write packet <{}>, length {}".format(cmd_name, orig_len),
+ )
+ self._log(DEBUG, util.format_binary(packet, "OUT: "))
+ if self.__block_engine_out is not None:
+ if self.__etm_out:
+ # packet length is not encrypted in EtM
+ out = packet[0:4] + self.__block_engine_out.update(
+ packet[4:]
+ )
+ elif self.__aead_out:
+ # Packet-length field is used as the 'associated data'
+ # under AES-GCM, so like EtM, it's not encrypted. See
+ # https://www.rfc-editor.org/rfc/rfc5647#section-7.3
+ out = packet[0:4] + self.__block_engine_out.encrypt(
+ self.__iv_out, packet[4:], packet[0:4]
+ )
+ self.__iv_out = self._inc_iv_counter(self.__iv_out)
+ else:
+ out = self.__block_engine_out.update(packet)
+ else:
+ out = packet
+ # Append an MAC when needed (eg, not under AES-GCM)
+ if self.__block_engine_out is not None and not self.__aead_out:
+ packed = struct.pack(">I", self.__sequence_number_out)
+ payload = packed + (out if self.__etm_out else packet)
+ out += compute_hmac(
+ self.__mac_key_out, payload, self.__mac_engine_out
+ )[: self.__mac_size_out]
+ next_seq = (self.__sequence_number_out + 1) & xffffffff
+ if next_seq == 0 and not self._initial_kex_done:
+ raise SSHException(
+ "Sequence number rolled over during initial kex!"
+ )
+ self.__sequence_number_out = next_seq
+ self.write_all(out)
+
+ self.__sent_bytes += len(out)
+ self.__sent_packets += 1
+ sent_too_much = (
+ self.__sent_packets >= self.REKEY_PACKETS
+ or self.__sent_bytes >= self.REKEY_BYTES
+ )
+ if sent_too_much and not self.__need_rekey:
+ # only ask once for rekeying
+ msg = "Rekeying (hit {} packets, {} bytes sent)"
+ self._log(
+ DEBUG, msg.format(self.__sent_packets, self.__sent_bytes)
+ )
+ self.__received_bytes_overflow = 0
+ self.__received_packets_overflow = 0
+ self._trigger_rekey()
+ finally:
+ self.__write_lock.release()
+
+ def read_message(self):
+ """
+ Only one thread should ever be in this function (no other locking is
+ done).
+
+ :raises: `.SSHException` -- if the packet is mangled
+ :raises: `.NeedRekeyException` -- if the transport should rekey
+ """
+ header = self.read_all(self.__block_size_in, check_rekey=True)
+ if self.__etm_in:
+ packet_size = struct.unpack(">I", header[:4])[0]
+ remaining = packet_size - self.__block_size_in + 4
+ packet = header[4:] + self.read_all(remaining, check_rekey=False)
+ mac = self.read_all(self.__mac_size_in, check_rekey=False)
+ mac_payload = (
+ struct.pack(">II", self.__sequence_number_in, packet_size)
+ + packet
+ )
+ my_mac = compute_hmac(
+ self.__mac_key_in, mac_payload, self.__mac_engine_in
+ )[: self.__mac_size_in]
+ if not util.constant_time_bytes_eq(my_mac, mac):
+ raise SSHException("Mismatched MAC")
+ header = packet
+
+ if self.__aead_in:
+ # Grab unencrypted (considered 'additional data' under GCM) packet
+ # length.
+ packet_size = struct.unpack(">I", header[:4])[0]
+ aad = header[:4]
+ remaining = (
+ packet_size - self.__block_size_in + 4 + self.__mac_size_in
+ )
+ packet = header[4:] + self.read_all(remaining, check_rekey=False)
+ header = self.__block_engine_in.decrypt(self.__iv_in, packet, aad)
+
+ self.__iv_in = self._inc_iv_counter(self.__iv_in)
+
+ if self.__block_engine_in is not None and not self.__aead_in:
+ header = self.__block_engine_in.update(header)
+ if self.__dump_packets:
+ self._log(DEBUG, util.format_binary(header, "IN: "))
+
+ # When ETM or AEAD (GCM) are in use, we've already read the packet size
+ # & decrypted everything, so just set the packet back to the header we
+ # obtained.
+ if self.__etm_in or self.__aead_in:
+ packet = header
+ # Otherwise, use the older non-ETM logic
+ else:
+ packet_size = struct.unpack(">I", header[:4])[0]
+
+ # leftover contains decrypted bytes from the first block (after the
+ # length field)
+ leftover = header[4:]
+ if (packet_size - len(leftover)) % self.__block_size_in != 0:
+ raise SSHException("Invalid packet blocking")
+ buf = self.read_all(
+ packet_size + self.__mac_size_in - len(leftover)
+ )
+ packet = buf[: packet_size - len(leftover)]
+ post_packet = buf[packet_size - len(leftover) :]
+
+ if self.__block_engine_in is not None:
+ packet = self.__block_engine_in.update(packet)
+ packet = leftover + packet
+
+ if self.__dump_packets:
+ self._log(DEBUG, util.format_binary(packet, "IN: "))
+
+ if self.__mac_size_in > 0 and not self.__etm_in and not self.__aead_in:
+ mac = post_packet[: self.__mac_size_in]
+ mac_payload = (
+ struct.pack(">II", self.__sequence_number_in, packet_size)
+ + packet
+ )
+ my_mac = compute_hmac(
+ self.__mac_key_in, mac_payload, self.__mac_engine_in
+ )[: self.__mac_size_in]
+ if not util.constant_time_bytes_eq(my_mac, mac):
+ raise SSHException("Mismatched MAC")
+ padding = byte_ord(packet[0])
+ payload = packet[1 : packet_size - padding]
+
+ if self.__dump_packets:
+ self._log(
+ DEBUG,
+ "Got payload ({} bytes, {} padding)".format(
+ packet_size, padding
+ ),
+ )
+
+ if self.__compress_engine_in is not None:
+ payload = self.__compress_engine_in(payload)
+
+ msg = Message(payload[1:])
+ msg.seqno = self.__sequence_number_in
+ next_seq = (self.__sequence_number_in + 1) & xffffffff
+ if next_seq == 0 and not self._initial_kex_done:
+ raise SSHException(
+ "Sequence number rolled over during initial kex!"
+ )
+ self.__sequence_number_in = next_seq
+
+ # check for rekey
+ raw_packet_size = packet_size + self.__mac_size_in + 4
+ self.__received_bytes += raw_packet_size
+ self.__received_packets += 1
+ if self.__need_rekey:
+ # we've asked to rekey -- give them some packets to comply before
+ # dropping the connection
+ self.__received_bytes_overflow += raw_packet_size
+ self.__received_packets_overflow += 1
+ if (
+ self.__received_packets_overflow
+ >= self.REKEY_PACKETS_OVERFLOW_MAX
+ ) or (
+ self.__received_bytes_overflow >= self.REKEY_BYTES_OVERFLOW_MAX
+ ):
+ raise SSHException(
+ "Remote transport is ignoring rekey requests"
+ )
+ elif (self.__received_packets >= self.REKEY_PACKETS) or (
+ self.__received_bytes >= self.REKEY_BYTES
+ ):
+ # only ask once for rekeying
+ err = "Rekeying (hit {} packets, {} bytes received)"
+ self._log(
+ DEBUG,
+ err.format(self.__received_packets, self.__received_bytes),
+ )
+ self.__received_bytes_overflow = 0
+ self.__received_packets_overflow = 0
+ self._trigger_rekey()
+
+ cmd = byte_ord(payload[0])
+ if cmd in MSG_NAMES:
+ cmd_name = MSG_NAMES[cmd]
+ else:
+ cmd_name = "${:x}".format(cmd)
+ if self.__dump_packets:
+ self._log(
+ DEBUG,
+ "Read packet <{}>, length {}".format(cmd_name, len(payload)),
+ )
+ return cmd, msg
+
+ # ...protected...
+
+ def _log(self, level, msg):
+ if self.__logger is None:
+ return
+ if issubclass(type(msg), list):
+ for m in msg:
+ self.__logger.log(level, m)
+ else:
+ self.__logger.log(level, msg)
+
+ def _check_keepalive(self):
+ if (
+ not self.__keepalive_interval
+ or not self.__block_engine_out
+ or self.__need_rekey
+ ):
+ # wait till we're encrypting, and not in the middle of rekeying
+ return
+ now = time.time()
+ if now > self.__keepalive_last + self.__keepalive_interval:
+ self.__keepalive_callback()
+ self.__keepalive_last = now
+
+ def _read_timeout(self, timeout):
+ start = time.time()
+ while True:
+ try:
+ x = self.__socket.recv(128)
+ if len(x) == 0:
+ raise EOFError()
+ break
+ except socket.timeout:
+ pass
+ if self.__closed:
+ raise EOFError()
+ now = time.time()
+ if now - start >= timeout:
+ raise socket.timeout()
+ return x
+
+ def _build_packet(self, payload):
+ # pad up at least 4 bytes, to nearest block-size (usually 8)
+ bsize = self.__block_size_out
+ # do not include payload length in computations for padding in EtM mode
+ # (payload length won't be encrypted)
+ addlen = 4 if self.__etm_out or self.__aead_out else 8
+ padding = 3 + bsize - ((len(payload) + addlen) % bsize)
+ packet = struct.pack(">IB", len(payload) + padding + 1, padding)
+ packet += payload
+ if self.__sdctr_out or self.__block_engine_out is None:
+ # cute trick i caught openssh doing: if we're not encrypting or
+ # SDCTR mode (RFC4344),
+ # don't waste random bytes for the padding
+ packet += zero_byte * padding
+ else:
+ packet += os.urandom(padding)
+ return packet
+
+ def _trigger_rekey(self):
+ # outside code should check for this flag
+ self.__need_rekey = True
diff --git a/lib/paramiko/pipe.py b/lib/paramiko/pipe.py
new file mode 100644
index 0000000..65944fa
--- /dev/null
+++ b/lib/paramiko/pipe.py
@@ -0,0 +1,148 @@
+# Copyright (C) 2003-2007 Robey Pointer
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""
+Abstraction of a one-way pipe where the read end can be used in
+`select.select`. Normally this is trivial, but Windows makes it nearly
+impossible.
+
+The pipe acts like an Event, which can be set or cleared. When set, the pipe
+will trigger as readable in `select