diff --git a/ci/default.nix b/ci/default.nix index 73c05a3529f5f..01fdb2cf00d01 100644 --- a/ci/default.nix +++ b/ci/default.nix @@ -143,6 +143,8 @@ let }; programs.zizmor.enable = true; + + programs.ruff-format.enable = true; }; fs = pkgs.lib.fileset; nixFilesSrc = fs.toSource { diff --git a/doc/tests/manpage-urls.py b/doc/tests/manpage-urls.py index a1ea6d27969e7..13483caeaafa3 100755 --- a/doc/tests/manpage-urls.py +++ b/doc/tests/manpage-urls.py @@ -14,17 +14,24 @@ from structlog.contextvars import bound_contextvars as log_context -LogLevel = IntEnum('LogLevel', { - lvl: getattr(logging, lvl) - for lvl in ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') -}) +LogLevel = IntEnum( + "LogLevel", + { + lvl: getattr(logging, lvl) + for lvl in ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") + }, +) LogLevel.__str__ = lambda self: self.name -EXPECTED_STATUS=frozenset(( - HTTPStatus.OK, HTTPStatus.FOUND, - HTTPStatus.NOT_FOUND, -)) +EXPECTED_STATUS = frozenset( + ( + HTTPStatus.OK, + HTTPStatus.FOUND, + HTTPStatus.NOT_FOUND, + ) +) + async def check(session: aiohttp.ClientSession, manpage: str, url: str) -> HTTPStatus: with log_context(manpage=manpage, url=url): @@ -43,6 +50,7 @@ async def check(session: aiohttp.ClientSession, manpage: str, url: str) -> HTTPS return st + async def main(urls_path: Path) -> Mapping[HTTPStatus, int]: logger.info(f"Parsing {urls_path}") with urls_path.open() as urls_file: @@ -52,36 +60,38 @@ async def main(urls_path: Path) -> Mapping[HTTPStatus, int]: logger.info(f"Checking URLs from {urls_path}") async with aiohttp.ClientSession() as session: - for status in asyncio.as_completed([ - check(session, manpage, url) - for manpage, url in urls.items() - ]): - count[await status]+=1 + for status in asyncio.as_completed( + [check(session, manpage, url) for manpage, url in urls.items()] + ): + count[await status] += 1 ok = count[HTTPStatus.OK] + count[HTTPStatus.FOUND] broken = count[HTTPStatus.NOT_FOUND] unknown = sum(c for st, c in count.items() if st not in EXPECTED_STATUS) - logger.info(f"Done: {broken} broken links, " - f"{ok} correct links, and {unknown} unexpected status") + logger.info( + f"Done: {broken} broken links, " + f"{ok} correct links, and {unknown} unexpected status" + ) return count def parse_args(args: Optional[Sequence[str]] = None) -> Namespace: parser = ArgumentParser( - prog = 'check-manpage-urls', - description = 'Check the validity of the manpage URLs linked in the nixpkgs manual', + prog="check-manpage-urls", + description="Check the validity of the manpage URLs linked in the nixpkgs manual", ) parser.add_argument( - '-l', '--log-level', - default = os.getenv('LOG_LEVEL', 'INFO'), - type = lambda s: LogLevel[s], - choices = list(LogLevel), + "-l", + "--log-level", + default=os.getenv("LOG_LEVEL", "INFO"), + type=lambda s: LogLevel[s], + choices=list(LogLevel), ) parser.add_argument( - 'file', - type = Path, - nargs = '?', + "file", + type=Path, + nargs="?", ) return parser.parse_args(args) @@ -102,7 +112,7 @@ def parse_args(args: Optional[Sequence[str]] = None) -> Namespace: REPO_ROOT = Path(__file__).parent.parent.parent.parent logger.info(f"Assuming we are in a nixpkgs repo rooted at {REPO_ROOT}") - urls_path = REPO_ROOT / 'doc' / 'manpage-urls.json' + urls_path = REPO_ROOT / "doc" / "manpage-urls.json" count = asyncio.run(main(urls_path)) diff --git a/maintainers/scripts/doc/escape-code-markup.py b/maintainers/scripts/doc/escape-code-markup.py index 015435b698e65..bd00f9d230a24 100755 --- a/maintainers/scripts/doc/escape-code-markup.py +++ b/maintainers/scripts/doc/escape-code-markup.py @@ -10,6 +10,7 @@ import re import sys + def replace_element_by_text(el: ET.Element, text: str) -> None: """ Author: bernulf @@ -26,6 +27,7 @@ def replace_element_by_text(el: ET.Element, text: str) -> None: parent.text = (parent.text or "") + text parent.remove(el) + DOCBOOK_NS = "http://docbook.org/ns/docbook" # List of elements that pandoc’s DocBook reader strips markup from. @@ -58,7 +60,8 @@ def replace_element_by_text(el: ET.Element, text: str) -> None: ] XMLNS_REGEX = re.compile(r'\s+xmlns(?::[^=]+)?="[^"]*"') -ROOT_ELEMENT_REGEX = re.compile(r'^\s*<[^>]+>') +ROOT_ELEMENT_REGEX = re.compile(r"^\s*<[^>]+>") + def remove_xmlns(match: re.Match) -> str: """ @@ -66,15 +69,18 @@ def remove_xmlns(match: re.Match) -> str: Expects a match containing an opening tag. """ - return XMLNS_REGEX.sub('', match.group(0)) + return XMLNS_REGEX.sub("", match.group(0)) + -if __name__ == '__main__': +if __name__ == "__main__": assert len(sys.argv) >= 3, "usage: escape-code-markup.py " tree = ET.parse(sys.argv[1]) name_predicate = " or ".join([f"local-name()='{el}'" for el in code_elements]) - for markup in tree.xpath(f"//*[({name_predicate}) and namespace-uri()='{DOCBOOK_NS}']/*"): + for markup in tree.xpath( + f"//*[({name_predicate}) and namespace-uri()='{DOCBOOK_NS}']/*" + ): text = ET.tostring(markup, encoding=str) # tostring adds xmlns attributes to the element we want to stringify diff --git a/maintainers/scripts/doc/replace-xrefs-by-empty-links.py b/maintainers/scripts/doc/replace-xrefs-by-empty-links.py index 2006ef897f7af..8db3e36ae5b03 100755 --- a/maintainers/scripts/doc/replace-xrefs-by-empty-links.py +++ b/maintainers/scripts/doc/replace-xrefs-by-empty-links.py @@ -17,14 +17,14 @@ } -if __name__ == '__main__': +if __name__ == "__main__": assert len(sys.argv) >= 3, "usage: replace-xrefs-by-empty-links.py " tree = ET.parse(sys.argv[1]) for xref in tree.findall(".//db:xref", ns): text = ET.tostring(xref, encoding=str) parent = xref.getparent() - link = parent.makeelement('link') + link = parent.makeelement("link") target_name = xref.get("linkend") link.set(f"{{{XLINK_NS}}}href", f"#{target_name}") parent.replace(xref, link) diff --git a/maintainers/scripts/hydra-eval-failures.py b/maintainers/scripts/hydra-eval-failures.py index b7518b1285745..efb5a42f51f6a 100755 --- a/maintainers/scripts/hydra-eval-failures.py +++ b/maintainers/scripts/hydra-eval-failures.py @@ -11,68 +11,83 @@ import requests from pyquery import PyQuery as pq -def map_dict (f, d): - for k,v in d.items(): + +def map_dict(f, d): + for k, v in d.items(): d[k] = f(v) -maintainers_json = subprocess.check_output([ - 'nix-instantiate', '-A', 'lib.maintainers', '--eval', '--strict', '--json' -]) + +maintainers_json = subprocess.check_output( + ["nix-instantiate", "-A", "lib.maintainers", "--eval", "--strict", "--json"] +) maintainers = json.loads(maintainers_json) -MAINTAINERS = map_dict(lambda v: v.get('github', None), maintainers) +MAINTAINERS = map_dict(lambda v: v.get("github", None), maintainers) + def get_response_text(url): return pq(requests.get(url).text) # IO + EVAL_FILE = { - 'nixos': 'nixos/release.nix', - 'nixpkgs': 'pkgs/top-level/release.nix', + "nixos": "nixos/release.nix", + "nixpkgs": "pkgs/top-level/release.nix", } def get_maintainers(attr_name): try: - nixname = attr_name.split('.') - meta_json = subprocess.check_output([ - 'nix-instantiate', - '--eval', - '--strict', - '-A', - '.'.join(nixname[1:]) + '.meta', - EVAL_FILE[nixname[0]], - '--arg', - 'nixpkgs', - './.', - '--json']) + nixname = attr_name.split(".") + meta_json = subprocess.check_output( + [ + "nix-instantiate", + "--eval", + "--strict", + "-A", + ".".join(nixname[1:]) + ".meta", + EVAL_FILE[nixname[0]], + "--arg", + "nixpkgs", + "./.", + "--json", + ] + ) meta = json.loads(meta_json) - return meta.get('maintainers', []) + return meta.get("maintainers", []) except: - return [] + return [] + def filter_github_users(maintainers): github_only = [] for i in maintainers: - if i.get('github'): + if i.get("github"): github_only.append(i) return github_only + def print_build(table_row): - a = pq(table_row)('a')[1] - print("- [ ] [{}]({})".format(a.text, a.get('href')), flush=True) + a = pq(table_row)("a")[1] + print("- [ ] [{}]({})".format(a.text, a.get("href")), flush=True) job_maintainers = filter_github_users(get_maintainers(a.text)) if job_maintainers: - print(" - maintainers: {}".format(" ".join(map(lambda u: '@' + u.get('github'), job_maintainers)))) + print( + " - maintainers: {}".format( + " ".join(map(lambda u: "@" + u.get("github"), job_maintainers)) + ) + ) # TODO: print last three persons that touched this file # TODO: pinpoint the diff that broke this build, or maybe it's transient or maybe it never worked? sys.stdout.flush() + @click.command() @click.option( - '--jobset', + "--jobset", default="nixos/release-19.09", - help='Hydra project like nixos/release-19.09') + help="Hydra project like nixos/release-19.09", +) def cli(jobset): """ Given a Hydra project, inspect latest evaluation @@ -82,31 +97,30 @@ def cli(jobset): url = "https://hydra.nixos.org/jobset/{}".format(jobset) # get the last evaluation - click.echo(click.style( - 'Getting latest evaluation for {}'.format(url), fg='green')) + click.echo(click.style("Getting latest evaluation for {}".format(url), fg="green")) d = get_response_text(url) - evaluations = d('#tabs-evaluations').find('a[class="row-link"]') - latest_eval_url = evaluations[0].get('href') + evaluations = d("#tabs-evaluations").find('a[class="row-link"]') + latest_eval_url = evaluations[0].get("href") # parse last evaluation page - click.echo(click.style( - 'Parsing evaluation {}'.format(latest_eval_url), fg='green')) - d = get_response_text(latest_eval_url + '?full=1') + click.echo(click.style("Parsing evaluation {}".format(latest_eval_url), fg="green")) + d = get_response_text(latest_eval_url + "?full=1") # TODO: aborted evaluations # TODO: dependency failed without propagated builds - print('\nFailures:') - for tr in d('img[alt="Failed"]').parents('tr'): + print("\nFailures:") + for tr in d('img[alt="Failed"]').parents("tr"): print_build(tr) - print('\nDependency failures:') - for tr in d('img[alt="Dependency failed"]').parents('tr'): + print("\nDependency failures:") + for tr in d('img[alt="Dependency failed"]').parents("tr"): print_build(tr) - if __name__ == "__main__": try: cli() except Exception as e: - import pdb;pdb.post_mortem() + import pdb + + pdb.post_mortem() diff --git a/maintainers/scripts/kde/collect-metadata.py b/maintainers/scripts/kde/collect-metadata.py index 3a7a3e95508e0..cf04e56446dc0 100755 --- a/maintainers/scripts/kde/collect-metadata.py +++ b/maintainers/scripts/kde/collect-metadata.py @@ -6,6 +6,7 @@ import utils + @click.command @click.argument( "repo-metadata", @@ -25,17 +26,16 @@ writable=True, path_type=pathlib.Path, ), - default=pathlib.Path(__file__).parent.parent.parent.parent -) -@click.option( - "--unstable", - default=False, - is_flag=True + default=pathlib.Path(__file__).parent.parent.parent.parent, ) +@click.option("--unstable", default=False, is_flag=True) def main(repo_metadata: pathlib.Path, nixpkgs: pathlib.Path, unstable: bool): - metadata = utils.KDERepoMetadata.from_repo_metadata_checkout(repo_metadata, unstable) + metadata = utils.KDERepoMetadata.from_repo_metadata_checkout( + repo_metadata, unstable + ) out_dir = nixpkgs / "pkgs/kde/generated" metadata.write_json(out_dir) + if __name__ == "__main__": main() # type: ignore diff --git a/maintainers/scripts/kde/collect-missing-deps.py b/maintainers/scripts/kde/collect-missing-deps.py index e3b687cba025c..154c014353693 100755 --- a/maintainers/scripts/kde/collect-missing-deps.py +++ b/maintainers/scripts/kde/collect-missing-deps.py @@ -4,18 +4,18 @@ OK_MISSING = { # we don't use precompiled QML - 'Qt6QuickCompiler', - 'Qt6QmlCompilerPlusPrivate', + "Qt6QuickCompiler", + "Qt6QmlCompilerPlusPrivate", # usually used for version numbers - 'Git', + "Git", # useless by itself, will warn if something else is not found - 'PkgConfig', + "PkgConfig", # license verification - 'ReuseTool', + "ReuseTool", # dev only - 'ClangFormat', + "ClangFormat", # doesn't exist - 'Qt6X11Extras', + "Qt6X11Extras", } OK_MISSING_BY_PACKAGE = { @@ -36,7 +36,7 @@ }, "extra-cmake-modules": { "Sphinx", # only used for docs, bloats closure size - "QCollectionGenerator" + "QCollectionGenerator", }, "gwenview": { "Tiff", # duplicate? @@ -118,9 +118,10 @@ }, "syntax-highlighting": { "XercesC", # only used for extra validation at build time - } + }, } + def main(): here = pathlib.Path(__file__).parent.parent.parent.parent logs = (here / "logs").glob("*.log") @@ -134,9 +135,15 @@ def main(): for line in fd: line = line.strip() if line.startswith("-- No package '"): - package = line.removeprefix("-- No package '").removesuffix("' found") + package = line.removeprefix("-- No package '").removesuffix( + "' found" + ) missing.append(package) - if line == "-- The following OPTIONAL packages have not been found:" or line == "-- The following RECOMMENDED packages have not been found:": + if ( + line == "-- The following OPTIONAL packages have not been found:" + or line + == "-- The following RECOMMENDED packages have not been found:" + ): is_in_block = True elif line.startswith("--") and is_in_block: is_in_block = False @@ -147,7 +154,10 @@ def main(): missing = { package for package in missing - if not any(package.startswith(i) for i in OK_MISSING | OK_MISSING_BY_PACKAGE.get(pname, set())) + if not any( + package.startswith(i) + for i in OK_MISSING | OK_MISSING_BY_PACKAGE.get(pname, set()) + ) } if missing: @@ -156,5 +166,6 @@ def main(): print(" -", line) print() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/maintainers/scripts/kde/generate-sources.py b/maintainers/scripts/kde/generate-sources.py index 2966e7ef96759..2d8cc45104283 100755 --- a/maintainers/scripts/kde/generate-sources.py +++ b/maintainers/scripts/kde/generate-sources.py @@ -17,21 +17,25 @@ import utils -LEAF_TEMPLATE = jinja2.Template(''' +LEAF_TEMPLATE = jinja2.Template( + """ { mkKdeDerivation }: mkKdeDerivation { pname = "{{ pname }}"; } -'''.strip()) +""".strip() +) -ROOT_TEMPLATE = jinja2.Template(''' +ROOT_TEMPLATE = jinja2.Template( + """ { callPackage }: { {%- for p in packages %} {{ p }} = callPackage ./{{ p }} { }; {%- endfor %} } -'''.strip()) +""".strip() +) PROJECTS_WITH_RUST = { "akonadi-search", @@ -39,6 +43,7 @@ "kdepim-addons", } + def to_sri(hash): raw = binascii.unhexlify(hash) b64 = base64.b64encode(raw).decode() @@ -47,15 +52,9 @@ def to_sri(hash): @click.command @click.argument( - "pkgset", - type=click.Choice(["frameworks", "gear", "plasma"]), - required=True -) -@click.argument( - "version", - type=str, - required=True + "pkgset", type=click.Choice(["frameworks", "gear", "plasma"]), required=True ) +@click.argument("version", type=str, required=True) @click.option( "--nixpkgs", type=click.Path( @@ -65,7 +64,7 @@ def to_sri(hash): writable=True, path_type=pathlib.Path, ), - default=pathlib.Path(__file__).parent.parent.parent.parent + default=pathlib.Path(__file__).parent.parent.parent.parent, ) @click.option( "--sources-url", @@ -128,13 +127,15 @@ def main(pkgset: str, version: str, nixpkgs: pathlib.Path, sources_url: str | No if existing := results.get(project_name): old_version = existing["version"] if v.parse(old_version) > v.parse(version): - print(f"{project_name} {old_version} is newer than {version}, skipping...") + print( + f"{project_name} {old_version} is newer than {version}, skipping..." + ) continue results[project_name] = { "version": version, "url": "mirror://kde" + urlparse(url).path, - "hash": to_sri(hash) + "hash": to_sri(hash), } pkg_dir = set_dir / project_name @@ -156,15 +157,17 @@ def main(pkgset: str, version: str, nixpkgs: pathlib.Path, sources_url: str | No json.dump(results, fd, indent=2) for project_name in projects_to_update_rust: - print(f"Updating cargoDeps hash for {pkgset}/{project_name}...") - subprocess.run([ + print(f"Updating cargoDeps hash for {pkgset}/{project_name}...") + subprocess.run( + [ "nix-update", f"kdePackages.{project_name}", "--version", "skip", "--override-filename", - pkg_file - ]) + pkg_file, + ] + ) if __name__ == "__main__": diff --git a/maintainers/scripts/kde/utils.py b/maintainers/scripts/kde/utils.py index 14ca61df35543..e2fa609504d85 100644 --- a/maintainers/scripts/kde/utils.py +++ b/maintainers/scripts/kde/utils.py @@ -7,6 +7,7 @@ import yaml + class DataclassEncoder(json.JSONEncoder): def default(self, it): if dataclasses.is_dataclass(it): @@ -31,12 +32,16 @@ def from_yaml(cls, path: pathlib.Path): name=data["identifier"], description=data["description"], project_path=data["projectpath"], - repo_path=data["repopath"] + repo_path=data["repopath"], ) def get_git_commit(path: pathlib.Path): - return subprocess.check_output(["git", "-C", path, "rev-parse", "--short", "HEAD"]).decode().strip() + return ( + subprocess.check_output(["git", "-C", path, "rev-parse", "--short", "HEAD"]) + .decode() + .strip() + ) def validate_unique(projects: list[Project], attr: str): @@ -74,7 +79,7 @@ def validate_unique(projects: list[Project], attr: str): "kdesupport/phonon-mplayer", "kdesupport/phonon-quicktime", "kdesupport/phonon-waveout", - "kdesupport/phonon-xine" + "kdesupport/phonon-xine", } WARNED = set() @@ -157,11 +162,25 @@ def write_json(self, root: pathlib.Path): root.mkdir(parents=True, exist_ok=True) with (root / "projects.json").open("w") as fd: - json.dump(self.projects_by_name, fd, cls=DataclassEncoder, sort_keys=True, indent=2) + json.dump( + self.projects_by_name, + fd, + cls=DataclassEncoder, + sort_keys=True, + indent=2, + ) with (root / "dependencies.json").open("w") as fd: - deps = {k.name: sorted(dep.name for dep in v) for k, v in self.dep_graph.items()} - json.dump({"version": self.version, "dependencies": deps}, fd, cls=DataclassEncoder, sort_keys=True, indent=2) + deps = { + k.name: sorted(dep.name for dep in v) for k, v in self.dep_graph.items() + } + json.dump( + {"version": self.version, "dependencies": deps}, + fd, + cls=DataclassEncoder, + sort_keys=True, + indent=2, + ) @classmethod def from_json(cls, root: pathlib.Path): @@ -179,7 +198,9 @@ def from_json(cls, root: pathlib.Path): dep_graph = collections.defaultdict(set) for dependent, dependencies in deps["dependencies"].items(): for dependency in dependencies: - dep_graph[self.projects_by_name[dependent]].add(self.projects_by_name[dependency]) + dep_graph[self.projects_by_name[dependent]].add( + self.projects_by_name[dependency] + ) self.dep_graph = dep_graph return self diff --git a/maintainers/scripts/remove-old-aliases.py b/maintainers/scripts/remove-old-aliases.py index bceee10748443..18506ce222c88 100755 --- a/maintainers/scripts/remove-old-aliases.py +++ b/maintainers/scripts/remove-old-aliases.py @@ -8,6 +8,7 @@ Check this file with mypy after every change! $ mypy --strict maintainers/scripts/remove-old-aliases.py """ + import argparse import shutil import subprocess @@ -141,7 +142,7 @@ def convert(lines: list[str], convert_to: str) -> list[tuple[str, str]]: x.strip() for x in line.split("=", maxsplit=2) ) if after_equal.startswith("warnAlias"): - after_equal = after_equal.split("\"", maxsplit=3)[2].strip() + after_equal = after_equal.split('"', maxsplit=3)[2].strip() except ValueError as err: print(err, line, "\n") lines.remove(line) diff --git a/maintainers/scripts/sha-to-sri.py b/maintainers/scripts/sha-to-sri.py index bb02a84e7cac6..00b3834d1c2e7 100755 --- a/maintainers/scripts/sha-to-sri.py +++ b/maintainers/scripts/sha-to-sri.py @@ -110,7 +110,7 @@ def regex(self): def decode(self, s): from base64 import b64decode - return b64decode(s, validate = True) + return b64decode(s, validate=True) _HASHES = (hashlib.new(n) for n in ("SHA-256", "SHA-512")) @@ -150,7 +150,7 @@ def f(m: re.Match[str]) -> str: except ValueError as exn: logger.error( "Skipping", - exc_info = exn, + exc_info=exn, ) return m.group() @@ -177,10 +177,10 @@ def atomicFileUpdate(target: Path): try: with target.open() as original: with NamedTemporaryFile( - dir = target.parent, - prefix = target.stem, - suffix = target.suffix, - delete = False, + dir=target.parent, + prefix=target.stem, + suffix=target.suffix, + delete=False, mode="w", # otherwise the file would be opened in binary mode by default ) as new: tmpPath = Path(new.name) @@ -189,30 +189,32 @@ def atomicFileUpdate(target: Path): tmpPath.replace(target) except Exception: - tmpPath.unlink(missing_ok = True) + tmpPath.unlink(missing_ok=True) raise def fileToSRI(p: Path): with atomicFileUpdate(p) as (og, new): for i, line in enumerate(og): - with log_context(line = i): + with log_context(line=i): new.write(defToSRI(line)) _SKIP_RE = re.compile("(generated by)|(do not edit)", re.IGNORECASE) -_IGNORE = frozenset({ - "gemset.nix", - "yarn.nix", -}) +_IGNORE = frozenset( + { + "gemset.nix", + "yarn.nix", + } +) if __name__ == "__main__": from sys import argv logger.info("Starting!") - def handleFile(p: Path, skipLevel = logging.INFO): - with log_context(file = str(p)): + def handleFile(p: Path, skipLevel=logging.INFO): + with log_context(file=str(p)): try: with p.open() as f: for line in f: @@ -228,16 +230,16 @@ def handleFile(p: Path, skipLevel = logging.INFO): except Exception as exn: logger.error( "Unhandled exception, skipping file!", - exc_info = exn, + exc_info=exn, ) else: logger.info("Finished processing file") for arg in argv[1:]: p = Path(arg) - with log_context(arg = arg): + with log_context(arg=arg): if p.is_file(): - handleFile(p, skipLevel = logging.WARNING) + handleFile(p, skipLevel=logging.WARNING) elif p.is_dir(): logger.info("Recursing into directory") diff --git a/maintainers/scripts/update.py b/maintainers/scripts/update.py index ca35f04d1285f..41dc84bb4f9f2 100644 --- a/maintainers/scripts/update.py +++ b/maintainers/scripts/update.py @@ -248,7 +248,7 @@ async def run_update_script( "nix-shell", nixpkgs_root + "/shell.nix", "--run", - " ".join([ shlex.quote(s) for s in update_script_command ]), + " ".join([shlex.quote(s) for s in update_script_command]), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree, diff --git a/nixos/lib/make-options-doc/mergeJSON.py b/nixos/lib/make-options-doc/mergeJSON.py index f6dd81d0f7807..b05a995619c23 100644 --- a/nixos/lib/make-options-doc/mergeJSON.py +++ b/nixos/lib/make-options-doc/mergeJSON.py @@ -6,42 +6,49 @@ JSON = Dict[str, Any] + class Key: def __init__(self, path: List[str]): self.path = path + def __hash__(self): result = 0 for id in self.path: result ^= hash(id) return result + def __eq__(self, other): return type(self) is type(other) and self.path == other.path -Option = collections.namedtuple('Option', ['name', 'value']) + +Option = collections.namedtuple("Option", ["name", "value"]) + # pivot a dict of options keyed by their display name to a dict keyed by their path def pivot(options: Dict[str, JSON]) -> Dict[Key, Option]: result: Dict[Key, Option] = dict() - for (name, opt) in options.items(): - result[Key(opt['loc'])] = Option(name, opt) + for name, opt in options.items(): + result[Key(opt["loc"])] = Option(name, opt) return result + # pivot back to indexed-by-full-name # like the docbook build we'll just fail if multiple options with differing locs # render to the same option name. def unpivot(options: Dict[Key, Option]) -> Dict[str, JSON]: result: Dict[str, Dict] = dict() - for (key, opt) in options.items(): + for key, opt in options.items(): if opt.name in result: raise RuntimeError( - 'multiple options with colliding ids found', + "multiple options with colliding ids found", opt.name, - result[opt.name]['loc'], - opt.value['loc'], + result[opt.name]["loc"], + opt.value["loc"], ) result[opt.name] = opt.value return result + warningsAreErrors = False optOffset = 0 for arg in sys.argv[1:]: @@ -49,14 +56,14 @@ def unpivot(options: Dict[Key, Option]) -> Dict[str, JSON]: optOffset += 1 warningsAreErrors = True -options = pivot(json.load(open(sys.argv[1 + optOffset], 'r'))) -overrides = pivot(json.load(open(sys.argv[2 + optOffset], 'r'))) +options = pivot(json.load(open(sys.argv[1 + optOffset], "r"))) +overrides = pivot(json.load(open(sys.argv[2 + optOffset], "r"))) # merge both descriptions -for (k, v) in overrides.items(): +for k, v in overrides.items(): cur = options.setdefault(k, v).value - for (ok, ov) in v.value.items(): - if ok == 'declarations': + for ok, ov in v.value.items(): + if ok == "declarations": decls = cur[ok] for d in ov: if d not in decls: @@ -73,26 +80,32 @@ def unpivot(options: Dict[Key, Option]) -> Dict[str, JSON]: # check that every option has a description hasWarnings = False hasErrors = False -for (k, v) in options.items(): - if v.value.get('description', None) is None: +for k, v in options.items(): + if v.value.get("description", None) is None: hasWarnings = True - print(f"\x1b[1;31m{severity}: option {v.name} has no description\x1b[0m", file=sys.stderr) - v.value['description'] = "This option has no description." - if v.value.get('type', "unspecified") == "unspecified": + print( + f"\x1b[1;31m{severity}: option {v.name} has no description\x1b[0m", + file=sys.stderr, + ) + v.value["description"] = "This option has no description." + if v.value.get("type", "unspecified") == "unspecified": hasWarnings = True print( - f"\x1b[1;31m{severity}: option {v.name} has no type. Please specify a valid type, see " + - "https://nixos.org/manual/nixos/stable/index.html#sec-option-types\x1b[0m", file=sys.stderr) + f"\x1b[1;31m{severity}: option {v.name} has no type. Please specify a valid type, see " + + "https://nixos.org/manual/nixos/stable/index.html#sec-option-types\x1b[0m", + file=sys.stderr, + ) if hasErrors: sys.exit(1) if hasWarnings and warningsAreErrors: print( - "\x1b[1;31m" + - "Treating warnings as errors. Set documentation.nixos.options.warningsAreErrors " + - "to false to ignore these warnings." + - "\x1b[0m", - file=sys.stderr) + "\x1b[1;31m" + + "Treating warnings as errors. Set documentation.nixos.options.warningsAreErrors " + + "to false to ignore these warnings." + + "\x1b[0m", + file=sys.stderr, + ) sys.exit(1) json.dump(unpivot(options), fp=sys.stdout) diff --git a/nixos/modules/image/assert_uki_repart_match.py b/nixos/modules/image/assert_uki_repart_match.py index e0f266cf15bfb..74c9d73eda09a 100644 --- a/nixos/modules/image/assert_uki_repart_match.py +++ b/nixos/modules/image/assert_uki_repart_match.py @@ -19,9 +19,7 @@ def extract_uki_cmdline_params(ukify_json: dict) -> dict[str, str]: params[key] = val if "usrhash" not in params: - print( - f"UKI cmdline does not contain a usrhash:\n{cmdline}" - ) + print(f"UKI cmdline does not contain a usrhash:\n{cmdline}") exit(1) return params @@ -33,17 +31,13 @@ def hashes_match(partition: dict[str, str], expected: str) -> bool: """ if partition.get("roothash") != expected: pretty_part = json.dumps(partition, indent=2) - print( - f"hash mismatch, expected to find roothash {expected} in:\n{pretty_part}" - ) + print(f"hash mismatch, expected to find roothash {expected} in:\n{pretty_part}") return False else: return True -def check_partitions( - partitions: list[dict], uki_params: dict[str, str] -) -> bool: +def check_partitions(partitions: list[dict], uki_params: dict[str, str]) -> bool: """ Checks if the usrhash from `uki_params` has a matching roothash for the corresponding partition in `partitions`. diff --git a/nixos/modules/services/misc/taskserver/helper-tool.py b/nixos/modules/services/misc/taskserver/helper-tool.py index b1eebb07686b2..3e0590d7d7f53 100644 --- a/nixos/modules/services/misc/taskserver/helper-tool.py +++ b/nixos/modules/services/misc/taskserver/helper-tool.py @@ -13,7 +13,7 @@ import click -IS_AUTO_CONFIG = @isAutoConfig@ # NOQA +IS_AUTO_CONFIG = "@isAutoConfig@" == "True" CERTTOOL_COMMAND = "@certtool@" CERT_BITS = "@certBits@" CLIENT_EXPIRATION = "@clientExpiration@" @@ -29,15 +29,15 @@ CA_CERT = os.path.join(TASKD_DATA_DIR, "keys", "ca.cert") CRL_FILE = os.path.join(TASKD_DATA_DIR, "keys", "server.crl") -RE_CONFIGUSER = re.compile(r'^\s*user\s*=(.*)$') -RE_USERKEY = re.compile(r'New user key: (.+)$', re.MULTILINE) +RE_CONFIGUSER = re.compile(r"^\s*user\s*=(.*)$") +RE_USERKEY = re.compile(r"New user key: (.+)$", re.MULTILINE) def lazyprop(fun): """ Decorator which only evaluates the specified function when accessed. """ - name = '_lazy_' + fun.__name__ + name = "_lazy_" + fun.__name__ @property def _lazy(self): @@ -65,6 +65,7 @@ def run_as_taskd_group(): gid = grp.getgrnam(TASKD_GROUP).gr_gid os.setgid(gid) + def taskd_cmd(cmd, *args, **kwargs): """ Invoke taskd with the specified command with the privileges of the 'taskd' @@ -78,7 +79,7 @@ def taskd_cmd(cmd, *args, **kwargs): return fun( [TASKD_COMMAND, cmd, "--data", TASKD_DATA_DIR] + list(args), preexec_fn=run_as_taskd_user, - **kwargs + **kwargs, ) @@ -96,7 +97,7 @@ def certtool_cmd(*args, **kwargs): [CERTTOOL_COMMAND] + list(args), preexec_fn=run_as_taskd_group, stderr=subprocess.STDOUT, - **kwargs + **kwargs, ) @@ -115,7 +116,7 @@ def mark_imperative(*path): file called ".imperative", so that it doesn't interfere with the declarative configuration. """ - open(os.path.join(mkpath(*path), ".imperative"), 'a').close() + open(os.path.join(mkpath(*path), ".imperative"), "a").close() def is_imperative(*path): @@ -155,14 +156,16 @@ def create_template(contents): def generate_key(org, user): if not IS_AUTO_CONFIG: - msg = "Automatic PKI handling is disabled, you need to " \ - "manually issue a client certificate for user {}.\n" + msg = ( + "Automatic PKI handling is disabled, you need to " + "manually issue a client certificate for user {}.\n" + ) sys.stderr.write(msg.format(user)) return - keysdir = os.path.join(TASKD_DATA_DIR, "keys" ) - orgdir = os.path.join(keysdir , org ) - userdir = os.path.join(orgdir , user ) + keysdir = os.path.join(TASKD_DATA_DIR, "keys") + orgdir = os.path.join(keysdir, org) + userdir = os.path.join(orgdir, user) if os.path.exists(userdir): raise OSError("Keyfile directory for {} already exists.".format(user)) @@ -194,17 +197,22 @@ def generate_key(org, user): "expiration_days = {}".format(CLIENT_EXPIRATION), "tls_www_client", "encryption_key", - "signing_key" + "signing_key", ] with create_template(template_data) as template: certtool_cmd( "-c", - "--load-privkey", privkey, - "--load-ca-privkey", CA_KEY, - "--load-ca-certificate", CA_CERT, - "--template", template, - "--outfile", pubcert + "--load-privkey", + privkey, + "--load-ca-privkey", + CA_KEY, + "--load-ca-certificate", + CA_CERT, + "--template", + template, + "--outfile", + pubcert, ) except: rmtree(userdir) @@ -226,12 +234,18 @@ def revoke_key(org, user): oldcrl.flush() certtool_cmd( "--generate-crl", - "--load-crl", oldcrl.name, - "--load-ca-privkey", CA_KEY, - "--load-ca-certificate", CA_CERT, - "--load-certificate", pubcert, - "--template", template, - "--outfile", CRL_FILE + "--load-crl", + oldcrl.name, + "--load-ca-privkey", + CA_KEY, + "--load-ca-certificate", + CA_CERT, + "--load-certificate", + pubcert, + "--template", + template, + "--outfile", + CRL_FILE, ) oldcrl.close() rmtree(basedir) @@ -253,7 +267,7 @@ def getkey(*args): buf.append(line) if is_key_line(line, "END"): - return ''.join(buf) + return "".join(buf) raise IOError("Unable to get key from {}.".format(path)) @@ -270,7 +284,7 @@ def __init__(self, org, name, key): self.key = key def export(self): - credentials = '/'.join([self.__org, self.name, self.key]) + credentials = "/".join([self.__org, self.name, self.key]) allow_unquoted = string.ascii_letters + string.digits + "/-_." if not all((c in allow_unquoted) for c in credentials): credentials = "'" + credentials.replace("'", r"'\''") + "'" @@ -287,15 +301,12 @@ def export(self): script += [ "umask 0077", 'mkdir -p "{}"'.format(keydir), - mktaskkey("certificate", os.path.join(keydir, "public.cert"), - pubcert), + mktaskkey("certificate", os.path.join(keydir, "public.cert"), pubcert), mktaskkey("key", os.path.join(keydir, "private.key"), privkey), - mktaskkey("ca", os.path.join(keydir, "ca.cert"), cacert) + mktaskkey("ca", os.path.join(keydir, "ca.cert"), cacert), ] - script.append( - "task config taskd.credentials -- {}".format(credentials) - ) + script.append("task config taskd.credentials -- {}".format(credentials)) return "\n".join(script) + "\n" @@ -320,8 +331,9 @@ def add_user(self, name): if self.ignore_imperative and is_imperative(self.name): return None if name not in self.users.keys(): - output = taskd_cmd("add", "user", self.name, name, - capture_stdout=True, encoding='utf-8') + output = taskd_cmd( + "add", "user", self.name, name, capture_stdout=True, encoding="utf-8" + ) key = RE_USERKEY.search(output) if key is None: msg = "Unable to find key while creating user {}." @@ -339,8 +351,7 @@ def del_user(self, name): """ if name in self.users.keys(): user = self.get_user(name) - if self.ignore_imperative and \ - is_imperative(self.name, "users", user.key): + if self.ignore_imperative and is_imperative(self.name, "users", user.key): return # Work around https://bug.tasktools.org/browse/TD-40: @@ -369,8 +380,7 @@ def del_group(self, name): Delete a group. """ if name in self.users.keys(): - if self.ignore_imperative and \ - is_imperative(self.name, "groups", name): + if self.ignore_imperative and is_imperative(self.name, "groups", name): return taskd_cmd("remove", "group", self.name, name) del self._lazy_groups[name] @@ -451,7 +461,7 @@ def orgs(self): class OrganisationType(click.ParamType): - name = 'organisation' + name = "organisation" def convert(self, value, param, ctx): org = Manager().get_org(value) @@ -459,6 +469,7 @@ def convert(self, value, param, ctx): self.fail("Organisation {} does not exist.".format(value)) return org + ORGANISATION = OrganisationType() @@ -590,9 +601,11 @@ def del_org(name): will be revoked. """ Manager().del_org(name) - msg = ("Organisation {} deleted. Be sure to restart the Taskserver" - " using 'systemctl restart taskserver.service' in order for" - " the certificate revocation to apply.") + msg = ( + "Organisation {} deleted. Be sure to restart the Taskserver" + " using 'systemctl restart taskserver.service' in order for" + " the certificate revocation to apply." + ) click.echo(msg.format(name), err=True) @@ -625,9 +638,11 @@ def del_user(organisation, user): This will also revoke the client certificate of the given user. """ organisation.del_user(user) - msg = ("User {} deleted. Be sure to restart the Taskserver using" - " 'systemctl restart taskserver.service' in order for the" - " certificate revocation to apply.") + msg = ( + "User {} deleted. Be sure to restart the Taskserver using" + " 'systemctl restart taskserver.service' in order for the" + " certificate revocation to apply." + ) click.echo(msg.format(user), err=True) @@ -678,7 +693,7 @@ def add_or_delete(old, new, add_fun, del_fun): @cli.command("process-json") -@click.argument('json-file', type=click.File('rb')) +@click.argument("json-file", type=click.File("rb")) def process_json(json_file): """ Create and delete users, groups and organisations based on a JSON file. @@ -698,11 +713,13 @@ def process_json(json_file): for org in mgr.orgs.values(): if is_imperative(org.name): continue - add_or_delete(org.users.keys(), data[org.name]['users'], - org.add_user, org.del_user) - add_or_delete(org.groups.keys(), data[org.name]['groups'], - org.add_group, org.del_group) + add_or_delete( + org.users.keys(), data[org.name]["users"], org.add_user, org.del_user + ) + add_or_delete( + org.groups.keys(), data[org.name]["groups"], org.add_group, org.del_group + ) -if __name__ == '__main__': +if __name__ == "__main__": cli() diff --git a/nixos/modules/system/boot/loader/limine/limine-install.py b/nixos/modules/system/boot/loader/limine/limine-install.py index a64f34eaefc30..e14752ece505f 100644 --- a/nixos/modules/system/boot/loader/limine/limine-install.py +++ b/nixos/modules/system/boot/loader/limine/limine-install.py @@ -16,6 +16,7 @@ import tempfile import textwrap + @dataclass class XenBootSpec: """Represent the bootspec extension for Xen dom0 kernels""" @@ -25,6 +26,7 @@ class XenBootSpec: params: List[str] version: str + @dataclass class BootSpec: system: str @@ -38,13 +40,15 @@ class BootSpec: initrd: str | None = None initrdSecrets: str | None = None -install_config = json.load(open('@configPath@', 'r')) + +install_config = json.load(open("@configPath@", "r")) libc = CDLL("libc.so.6") limine_install_dir: Optional[str] = None can_use_direct_paths = False paths: Dict[str, bool] = {} + def config(*path: str) -> Optional[Any]: result = install_config for component in path: @@ -53,60 +57,69 @@ def config(*path: str) -> Optional[Any]: def bool_to_yes_no(value: bool) -> str: - return 'yes' if value else 'no' + return "yes" if value else "no" -def get_system_path(profile: str = 'system', gen: Optional[str] = None, spec: Optional[str] = None) -> str: - basename = f'{profile}-{gen}-link' if gen is not None else profile - profiles_dir = '/nix/var/nix/profiles' - if profile == 'system': +def get_system_path( + profile: str = "system", gen: Optional[str] = None, spec: Optional[str] = None +) -> str: + basename = f"{profile}-{gen}-link" if gen is not None else profile + profiles_dir = "/nix/var/nix/profiles" + if profile == "system": result = os.path.join(profiles_dir, basename) else: - result = os.path.join(profiles_dir, 'system-profiles', basename) + result = os.path.join(profiles_dir, "system-profiles", basename) if spec is not None: - result = os.path.join(result, 'specialisation', spec) + result = os.path.join(result, "specialisation", spec) return result def get_profiles() -> List[str]: - profiles_dir = '/nix/var/nix/profiles/system-profiles/' + profiles_dir = "/nix/var/nix/profiles/system-profiles/" dirs = os.listdir(profiles_dir) if os.path.isdir(profiles_dir) else [] - return [path for path in dirs if not path.endswith('-link')] - - -def get_gens(profile: str = 'system') -> List[Tuple[int, List[str]]]: - nix_env = os.path.join(str(config('nixPath')), 'bin', 'nix-env') - output = subprocess.check_output([ - nix_env, '--list-generations', - '-p', get_system_path(profile), - '--option', 'build-users-group', '', - ], universal_newlines=True) + return [path for path in dirs if not path.endswith("-link")] + + +def get_gens(profile: str = "system") -> List[Tuple[int, List[str]]]: + nix_env = os.path.join(str(config("nixPath")), "bin", "nix-env") + output = subprocess.check_output( + [ + nix_env, + "--list-generations", + "-p", + get_system_path(profile), + "--option", + "build-users-group", + "", + ], + universal_newlines=True, + ) gen_lines = output.splitlines() gen_nums = [int(line.split()[0]) for line in gen_lines] - return [gen for gen in gen_nums][-config('maxGenerations'):] + return [gen for gen in gen_nums][-config("maxGenerations") :] def is_encrypted(device: str) -> bool: - for name in config('luksDevices'): - if os.readlink(os.path.join('/dev/mapper', name)) == os.readlink(device): + for name in config("luksDevices"): + if os.readlink(os.path.join("/dev/mapper", name)) == os.readlink(device): return True return False def is_fs_type_supported(fs_type: str) -> bool: - return fs_type.startswith('vfat') + return fs_type.startswith("vfat") def get_dest_file(path: str) -> str: package_id = os.path.basename(os.path.dirname(path)) suffix = os.path.basename(path) - return f'{package_id}-{suffix}' + return f"{package_id}-{suffix}" def get_dest_path(path: str, target: str) -> str: @@ -115,7 +128,7 @@ def get_dest_path(path: str, target: str) -> str: def get_copied_path_uri(path: str, target: str) -> str: - result = '' + result = "" dest_file = get_dest_file(path) dest_path = get_dest_path(path, target) @@ -125,15 +138,15 @@ def get_copied_path_uri(path: str, target: str) -> str: else: paths[dest_path] = True - path_with_prefix = os.path.join('/limine', target, dest_file) - result = f'boot():{path_with_prefix}' + path_with_prefix = os.path.join("/limine", target, dest_file) + result = f"boot():{path_with_prefix}" - if config('validateChecksums'): - with open(path, 'rb') as file: + if config("validateChecksums"): + with open(path, "rb") as file: b2sum = hashlib.blake2b() b2sum.update(file.read()) - result += f'#{b2sum.hexdigest()}' + result += f"#{b2sum.hexdigest()}" return result @@ -142,7 +155,9 @@ def get_path_uri(path: str) -> str: return get_copied_path_uri(path, "") -def get_file_uri(profile: str, gen: Optional[str], spec: Optional[str], name: str) -> str: +def get_file_uri( + profile: str, gen: Optional[str], spec: Optional[str], name: str +) -> str: gen_path = get_system_path(profile, gen, spec) path_in_store = os.path.realpath(os.path.join(gen_path, name)) return get_path_uri(path_in_store) @@ -151,22 +166,21 @@ def get_file_uri(profile: str, gen: Optional[str], spec: Optional[str], name: st def get_kernel_uri(kernel_path: str) -> str: return get_copied_path_uri(kernel_path, "kernels") + def bootjson_to_bootspec(bootjson: dict) -> BootSpec: - specialisations = bootjson['org.nixos.specialisation.v1'] + specialisations = bootjson["org.nixos.specialisation.v1"] specialisations = {k: bootjson_to_bootspec(v) for k, v in specialisations.items()} xen = None - if 'org.xenproject.bootspec.v2' in bootjson: - xen = bootjson['org.xenproject.bootspec.v2'] + if "org.xenproject.bootspec.v2" in bootjson: + xen = bootjson["org.xenproject.bootspec.v2"] return BootSpec( - **bootjson['org.nixos.bootspec.v1'], + **bootjson["org.nixos.bootspec.v1"], specialisations=specialisations, xen=xen, ) -def generate_xen_efi_files( - bootspec: BootSpec, - gen: str - ) -> str: + +def generate_xen_efi_files(bootspec: BootSpec, gen: str) -> str: """Generate a Xen EFI xen.cfg file, and copy required files in place. Assumes the bootspec has already been validated as having the requried @@ -179,43 +193,43 @@ def generate_xen_efi_files( Returns the path to the Xen EFI binary """ - xen_efi_boot_path = get_copied_path_uri(bootspec.xen['efiPath'], f'xen/{gen}') - xen_efi_path = get_dest_path(bootspec.xen['efiPath'], f'xen/{gen}') + xen_efi_boot_path = get_copied_path_uri(bootspec.xen["efiPath"], f"xen/{gen}") + xen_efi_path = get_dest_path(bootspec.xen["efiPath"], f"xen/{gen}") xen_efi_cfg_dir = os.path.dirname(xen_efi_path) - xen_efi_cfg_path = xen_efi_path[:-4] + '.cfg' + xen_efi_cfg_path = xen_efi_path[:-4] + ".cfg" if not os.path.exists(xen_efi_cfg_dir): os.makedirs(xen_efi_cfg_dir) - xen_efi_cfg = ( - f'default=nixos{gen}\n\n' + - f'[nixos{gen}]\n' - ) + xen_efi_cfg = f"default=nixos{gen}\n\n" + f"[nixos{gen}]\n" # set xen dom0 parameters - if 'params' in bootspec.xen and len(bootspec.xen['params']) > 0: - xen_efi_cfg += 'options=' + ' '.join(bootspec.xen['params']).strip() + '\n' + if "params" in bootspec.xen and len(bootspec.xen["params"]) > 0: + xen_efi_cfg += "options=" + " ".join(bootspec.xen["params"]).strip() + "\n" # set kernel and copy in-place - xen_efi_kernel_path = get_dest_path(bootspec.kernel, f'xen/{gen}') + xen_efi_kernel_path = get_dest_path(bootspec.kernel, f"xen/{gen}") copy_file(bootspec.kernel, xen_efi_kernel_path) xen_efi_cfg += ( - 'kernel=' + os.path.basename(xen_efi_kernel_path) + ' ' - + ' '.join(['init=' + bootspec.init] + bootspec.kernelParams).strip() - + '\n' + "kernel=" + + os.path.basename(xen_efi_kernel_path) + + " " + + " ".join(["init=" + bootspec.init] + bootspec.kernelParams).strip() + + "\n" ) # set ramdisk and copy initrd in-place if bootspec.initrd: - xen_efi_initrd_path = get_dest_path(bootspec.initrd, f'xen/{gen}') + xen_efi_initrd_path = get_dest_path(bootspec.initrd, f"xen/{gen}") copy_file(bootspec.initrd, xen_efi_initrd_path) - xen_efi_cfg += 'ramdisk=' + os.path.basename(xen_efi_initrd_path) + '\n' + xen_efi_cfg += "ramdisk=" + os.path.basename(xen_efi_initrd_path) + "\n" - with open(xen_efi_cfg_path, 'w') as xen_efi_cfg_file: + with open(xen_efi_cfg_path, "w") as xen_efi_cfg_file: xen_efi_cfg_file.write(xen_efi_cfg) return xen_efi_boot_path + def xen_config_entry( levels: int, bootspec: BootSpec, xenVersion: str, gen: str, time: str, efi: bool ) -> str: @@ -230,15 +244,19 @@ def xen_config_entry( efi -- True if EFI protocol should be used for this entry """ # generate Xen menu label for the current generation - entry = '/' * levels + f'Generation {gen} with Xen {xenVersion}' + (' EFI\n' if efi else '\n') - entry += f'comment: Xen {xenVersion} {bootspec.label}, built on {time}\n' + entry = ( + "/" * levels + + f"Generation {gen} with Xen {xenVersion}" + + (" EFI\n" if efi else "\n") + ) + entry += f"comment: Xen {xenVersion} {bootspec.label}, built on {time}\n" # load Xen dom0 as the executable, using multiboot for EFI & BIOS if ( - efi and - 'multibootPath' in bootspec.xen and - len(bootspec.xen['multibootPath']) > 0 and - os.path.exists(bootspec.xen['multibootPath']) - ): + efi + and "multibootPath" in bootspec.xen + and len(bootspec.xen["multibootPath"]) > 0 + and os.path.exists(bootspec.xen["multibootPath"]) + ): # Use the EFI protocol and generate Xen EFI configuration # files and directories which are loaded by Xen's EFI binary # directly. @@ -247,78 +265,94 @@ def xen_config_entry( # an entry-point in Xen's multiboot binary, and multiboot1 # doesn't work under EFI. # Upstream Limine issue #482 - entry += 'protocol: efi\n' - entry += ( - 'path: ' + generate_xen_efi_files(bootspec, gen) + '\n' - ) + entry += "protocol: efi\n" + entry += "path: " + generate_xen_efi_files(bootspec, gen) + "\n" elif ( - 'multibootPath' in bootspec.xen and - len(bootspec.xen['multibootPath']) > 0 and - os.path.exists(bootspec.xen['multibootPath']) - ): + "multibootPath" in bootspec.xen + and len(bootspec.xen["multibootPath"]) > 0 + and os.path.exists(bootspec.xen["multibootPath"]) + ): # Use multiboot1 if not generating an EFI entry, as multiboot2 # doesn't work under Limine for booting Xen. # Upstream Limine issue #483 - entry += 'protocol: multiboot\n' + entry += "protocol: multiboot\n" entry += ( - 'path: ' + get_copied_path_uri(bootspec.xen['multibootPath'], f'xen/{gen}') + '\n' + "path: " + + get_copied_path_uri(bootspec.xen["multibootPath"], f"xen/{gen}") + + "\n" ) # set params as the multiboot executable's parameters - if 'params' in bootspec.xen and len(bootspec.xen['params']) > 0: + if "params" in bootspec.xen and len(bootspec.xen["params"]) > 0: # TODO: Understand why the first argument is ignored below? # --- to work around first argument being ignored - entry += ( - 'cmdline: -- ' + ' '.join(bootspec.xen['params']).strip() + '\n' - ) + entry += "cmdline: -- " + " ".join(bootspec.xen["params"]).strip() + "\n" # load the linux kernel as the second module - entry += 'module_path: ' + get_kernel_uri(bootspec.kernel) + '\n' + entry += "module_path: " + get_kernel_uri(bootspec.kernel) + "\n" # set kernel parameters as the parameters to the first module # TODO: Understand why the first argument is ignored below? # --- to work around first argument being ignored entry += ( - 'module_string: -- ' - + ' '.join(['init=' + bootspec.init] + bootspec.kernelParams).strip() - + '\n' + "module_string: -- " + + " ".join(["init=" + bootspec.init] + bootspec.kernelParams).strip() + + "\n" ) if bootspec.initrd: # the final module is the initrd - entry += 'module_path: ' + get_kernel_uri(bootspec.initrd) + '\n' + entry += "module_path: " + get_kernel_uri(bootspec.initrd) + "\n" return entry + def config_entry(levels: int, bootspec: BootSpec, label: str, time: str) -> str: - entry = '/' * levels + label + '\n' - entry += 'protocol: linux\n' - entry += f'comment: {bootspec.label}, built on {time}\n' - entry += 'kernel_path: ' + get_kernel_uri(bootspec.kernel) + '\n' - entry += 'cmdline: ' + ' '.join(['init=' + bootspec.init] + bootspec.kernelParams).strip() + '\n' + entry = "/" * levels + label + "\n" + entry += "protocol: linux\n" + entry += f"comment: {bootspec.label}, built on {time}\n" + entry += "kernel_path: " + get_kernel_uri(bootspec.kernel) + "\n" + entry += ( + "cmdline: " + + " ".join(["init=" + bootspec.init] + bootspec.kernelParams).strip() + + "\n" + ) if bootspec.initrd: - entry += f'module_path: ' + get_kernel_uri(bootspec.initrd) + '\n' + entry += f"module_path: " + get_kernel_uri(bootspec.initrd) + "\n" if bootspec.initrdSecrets: - base_path = str(limine_install_dir) + '/kernels/' - initrd_secrets_path = base_path + os.path.basename(bootspec.toplevel) + '-secrets' + base_path = str(limine_install_dir) + "/kernels/" + initrd_secrets_path = ( + base_path + os.path.basename(bootspec.toplevel) + "-secrets" + ) if not os.path.exists(base_path): os.makedirs(base_path) old_umask = os.umask(0o137) - initrd_secrets_path_temp = tempfile.mktemp(os.path.basename(bootspec.toplevel) + '-secrets') + initrd_secrets_path_temp = tempfile.mktemp( + os.path.basename(bootspec.toplevel) + "-secrets" + ) if os.system(bootspec.initrdSecrets + " " + initrd_secrets_path_temp) != 0: - print(f'warning: failed to create initrd secrets for "{label}"', file=sys.stderr) - print(f'note: if this is an older generation there is nothing to worry about') + print( + f'warning: failed to create initrd secrets for "{label}"', + file=sys.stderr, + ) + print( + f"note: if this is an older generation there is nothing to worry about" + ) if os.path.exists(initrd_secrets_path_temp): copy_file(initrd_secrets_path_temp, initrd_secrets_path) os.unlink(initrd_secrets_path_temp) - entry += 'module_path: ' + get_kernel_uri(initrd_secrets_path) + '\n' + entry += "module_path: " + get_kernel_uri(initrd_secrets_path) + "\n" os.umask(old_umask) return entry def generate_config_entry(profile: str, gen: str, special: bool) -> str: - time = datetime.datetime.fromtimestamp(os.stat(get_system_path(profile,gen), follow_symlinks=False).st_mtime).strftime("%F %H:%M:%S") - boot_json = json.load(open(os.path.join(get_system_path(profile, gen), 'boot.json'), 'r')) + time = datetime.datetime.fromtimestamp( + os.stat(get_system_path(profile, gen), follow_symlinks=False).st_mtime + ).strftime("%F %H:%M:%S") + boot_json = json.load( + open(os.path.join(get_system_path(profile, gen), "boot.json"), "r") + ) boot_spec = bootjson_to_bootspec(boot_json) specialisation_list = boot_spec.specialisations.items() @@ -326,37 +360,37 @@ def generate_config_entry(profile: str, gen: str, special: bool) -> str: entry = "" # Xen, if configured, should be listed first for each generation - if boot_spec.xen and 'version' in boot_spec.xen: - xen_version = boot_spec.xen['version'] - if config('efiSupport'): + if boot_spec.xen and "version" in boot_spec.xen: + xen_version = boot_spec.xen["version"] + if config("efiSupport"): entry += xen_config_entry(2, boot_spec, xen_version, gen, time, True) entry += xen_config_entry(2, boot_spec, xen_version, gen, time, False) if len(specialisation_list) > 0: depth += 1 - entry += '/' * (depth-1) + entry += "/" * (depth - 1) if special: - entry += '+' + entry += "+" - entry += f'Generation {gen}' + '\n' - entry += config_entry(depth, boot_spec, f'Default', str(time)) + entry += f"Generation {gen}" + "\n" + entry += config_entry(depth, boot_spec, f"Default", str(time)) else: - entry += config_entry(depth, boot_spec, f'Generation {gen}', str(time)) + entry += config_entry(depth, boot_spec, f"Generation {gen}", str(time)) for spec, spec_boot_spec in specialisation_list: - entry += config_entry(depth, spec_boot_spec, f'{spec}', str(time)) + entry += config_entry(depth, spec_boot_spec, f"{spec}", str(time)) return entry def find_disk_device(part: str) -> str: part = os.path.realpath(part) - part = part.removeprefix('/dev/') - disk = os.path.realpath(os.path.join('/sys', 'class', 'block', part)) + part = part.removeprefix("/dev/") + disk = os.path.realpath(os.path.join("/sys", "class", "block", part)) disk = os.path.dirname(disk) - return os.path.join('/dev', os.path.basename(disk)) + return os.path.join("/dev", os.path.basename(disk)) def find_mounted_device(path: str) -> str: @@ -397,34 +431,48 @@ def install_bootloader() -> None: boot_fs = None - for mount_point, fs in config('fileSystems').items(): - if mount_point == '/boot': + for mount_point, fs in config("fileSystems").items(): + if mount_point == "/boot": boot_fs = fs - if config('efiSupport'): - limine_install_dir = os.path.join(str(config('efiMountPoint')), 'limine') - elif boot_fs and is_fs_type_supported(boot_fs['fsType']) and not is_encrypted(boot_fs['device']): - limine_install_dir = '/boot/limine' + if config("efiSupport"): + limine_install_dir = os.path.join(str(config("efiMountPoint")), "limine") + elif ( + boot_fs + and is_fs_type_supported(boot_fs["fsType"]) + and not is_encrypted(boot_fs["device"]) + ): + limine_install_dir = "/boot/limine" else: possible_causes = [] if not boot_fs: - possible_causes.append(f'/limine on the boot partition (not present)') + possible_causes.append(f"/limine on the boot partition (not present)") else: - is_boot_fs_type_ok = is_fs_type_supported(boot_fs['fsType']) - is_boot_fs_encrypted = is_encrypted(boot_fs['device']) - possible_causes.append(f'/limine on the boot partition ({is_boot_fs_type_ok=} {is_boot_fs_encrypted=})') + is_boot_fs_type_ok = is_fs_type_supported(boot_fs["fsType"]) + is_boot_fs_encrypted = is_encrypted(boot_fs["device"]) + possible_causes.append( + f"/limine on the boot partition ({is_boot_fs_type_ok=} {is_boot_fs_encrypted=})" + ) - causes_str = textwrap.indent('\n'.join(possible_causes), ' - ') + causes_str = textwrap.indent("\n".join(possible_causes), " - ") - raise Exception(textwrap.dedent(''' + raise Exception( + textwrap.dedent(""" Could not find a valid place for Limine configuration files!' Possible candidates that were ruled out: - ''') + causes_str + textwrap.dedent(''' + """) + + causes_str + + textwrap.dedent(""" Limine cannot be installed on a system without an unencrypted partition formatted as FAT. - ''')) + """) + ) - if config('secureBoot', 'enable') and not config('secureBoot', 'createAndEnrollKeys') and not os.path.exists("/var/lib/sbctl"): + if ( + config("secureBoot", "enable") + and not config("secureBoot", "createAndEnrollKeys") + and not os.path.exists("/var/lib/sbctl") + ): print("There are no sbctl secure boot keys present. Please generate some.") sys.exit(1) @@ -435,63 +483,97 @@ def install_bootloader() -> None: for file in files: paths[os.path.join(dir, file)] = False - limine_xen_dir = os.path.join(limine_install_dir, 'xen') + limine_xen_dir = os.path.join(limine_install_dir, "xen") if os.path.exists(limine_xen_dir): - print(f'cleaning {limine_xen_dir}') + print(f"cleaning {limine_xen_dir}") shutil.rmtree(limine_xen_dir) os.makedirs(limine_xen_dir) - profiles = [('system', get_gens())] + profiles = [("system", get_gens())] for profile in get_profiles(): profiles += [(profile, get_gens(profile))] - timeout = config('timeout') - editor_enabled = bool_to_yes_no(config('enableEditor')) - hash_mismatch_panic = bool_to_yes_no(config('panicOnChecksumMismatch')) + timeout = config("timeout") + editor_enabled = bool_to_yes_no(config("enableEditor")) + hash_mismatch_panic = bool_to_yes_no(config("panicOnChecksumMismatch")) last_gen = get_gens()[-1] - last_gen_json = json.load(open(os.path.join(get_system_path('system', last_gen), 'boot.json'), 'r')) + last_gen_json = json.load( + open(os.path.join(get_system_path("system", last_gen), "boot.json"), "r") + ) last_gen_boot_spec = bootjson_to_bootspec(last_gen_json) - config_file = str(config('extraConfig')) + '\n' - config_file += textwrap.dedent(f''' + config_file = str(config("extraConfig")) + "\n" + config_file += textwrap.dedent(f""" timeout: {timeout} editor_enabled: {editor_enabled} hash_mismatch_panic: {hash_mismatch_panic} graphics: yes default_entry: {3 if len(last_gen_boot_spec.specialisations.items()) > 0 else 2} - ''') - - for wallpaper in config('style', 'wallpapers'): - config_file += f'''wallpaper: {get_copied_path_uri(wallpaper, 'wallpapers')}\n''' - - config_file += option_from_config('wallpaper_style', ['style', 'wallpaperStyle']) - config_file += option_from_config('backdrop', ['style', 'backdrop']) - - config_file += option_from_config('interface_resolution', ['style', 'interface', 'resolution']) - config_file += option_from_config('interface_branding', ['style', 'interface', 'branding']) - config_file += option_from_config('interface_branding_colour', ['style', 'interface', 'brandingColor']) - config_file += option_from_config('interface_help_hidden', ['style', 'interface', 'helpHidden']) - config_file += option_from_config('term_font_scale', ['style', 'graphicalTerminal', 'font', 'scale']) - config_file += option_from_config('term_font_spacing', ['style', 'graphicalTerminal', 'font', 'spacing']) - config_file += option_from_config('term_palette', ['style', 'graphicalTerminal', 'palette']) - config_file += option_from_config('term_palette_bright', ['style', 'graphicalTerminal', 'brightPalette']) - config_file += option_from_config('term_foreground', ['style', 'graphicalTerminal', 'foreground']) - config_file += option_from_config('term_background', ['style', 'graphicalTerminal', 'background']) - config_file += option_from_config('term_foreground_bright', ['style', 'graphicalTerminal', 'brightForeground']) - config_file += option_from_config('term_background_bright', ['style', 'graphicalTerminal', 'brightBackground']) - config_file += option_from_config('term_margin', ['style', 'graphicalTerminal', 'margin']) - config_file += option_from_config('term_margin_gradient', ['style', 'graphicalTerminal', 'marginGradient']) - - config_file += textwrap.dedent(''' + """) + + for wallpaper in config("style", "wallpapers"): + config_file += ( + f"""wallpaper: {get_copied_path_uri(wallpaper, "wallpapers")}\n""" + ) + + config_file += option_from_config("wallpaper_style", ["style", "wallpaperStyle"]) + config_file += option_from_config("backdrop", ["style", "backdrop"]) + + config_file += option_from_config( + "interface_resolution", ["style", "interface", "resolution"] + ) + config_file += option_from_config( + "interface_branding", ["style", "interface", "branding"] + ) + config_file += option_from_config( + "interface_branding_colour", ["style", "interface", "brandingColor"] + ) + config_file += option_from_config( + "interface_help_hidden", ["style", "interface", "helpHidden"] + ) + config_file += option_from_config( + "term_font_scale", ["style", "graphicalTerminal", "font", "scale"] + ) + config_file += option_from_config( + "term_font_spacing", ["style", "graphicalTerminal", "font", "spacing"] + ) + config_file += option_from_config( + "term_palette", ["style", "graphicalTerminal", "palette"] + ) + config_file += option_from_config( + "term_palette_bright", ["style", "graphicalTerminal", "brightPalette"] + ) + config_file += option_from_config( + "term_foreground", ["style", "graphicalTerminal", "foreground"] + ) + config_file += option_from_config( + "term_background", ["style", "graphicalTerminal", "background"] + ) + config_file += option_from_config( + "term_foreground_bright", ["style", "graphicalTerminal", "brightForeground"] + ) + config_file += option_from_config( + "term_background_bright", ["style", "graphicalTerminal", "brightBackground"] + ) + config_file += option_from_config( + "term_margin", ["style", "graphicalTerminal", "margin"] + ) + config_file += option_from_config( + "term_margin_gradient", ["style", "graphicalTerminal", "marginGradient"] + ) + + config_file += textwrap.dedent(""" # NixOS boot entries start here - ''') + """) - for (profile, gens) in profiles: - group_name = 'default profile' if profile == 'system' else f"profile '{profile}'" - config_file += f'/+NixOS {group_name}\n' + for profile, gens in profiles: + group_name = ( + "default profile" if profile == "system" else f"profile '{profile}'" + ) + config_file += f"/+NixOS {group_name}\n" isFirst = True @@ -499,12 +581,12 @@ def install_bootloader() -> None: config_file += generate_config_entry(profile, gen, isFirst) isFirst = False - config_file_path = os.path.join(limine_install_dir, 'limine.conf') - config_file += '\n# NixOS boot entries end here\n\n' + config_file_path = os.path.join(limine_install_dir, "limine.conf") + config_file += "\n# NixOS boot entries end here\n\n" - config_file += str(config('extraEntries')) + config_file += str(config("extraEntries")) - with open(f"{config_file_path}.tmp", 'w') as file: + with open(f"{config_file_path}.tmp", "w") as file: file.truncate() file.write(config_file.strip()) file.flush() @@ -513,147 +595,203 @@ def install_bootloader() -> None: paths[config_file_path] = True - for dest_path, source_path in config('additionalFiles').items(): + for dest_path, source_path in config("additionalFiles").items(): dest_path = os.path.join(limine_install_dir, dest_path) copy_file(source_path, dest_path) - limine_binary = os.path.join(str(config('liminePath')), 'bin', 'limine') - cpu_family = config('hostArchitecture', 'family') - if config('efiSupport'): + limine_binary = os.path.join(str(config("liminePath")), "bin", "limine") + cpu_family = config("hostArchitecture", "family") + if config("efiSupport"): boot_file = "" - if cpu_family == 'x86': - if config('hostArchitecture', 'bits') == 32: - boot_file = 'BOOTIA32.EFI' - elif config('hostArchitecture', 'bits') == 64: - boot_file = 'BOOTX64.EFI' - elif cpu_family == 'arm': - if config('hostArchitecture', 'arch') == 'armv8-a' and config('hostArchitecture', 'bits') == 64: - boot_file = 'BOOTAA64.EFI' + if cpu_family == "x86": + if config("hostArchitecture", "bits") == 32: + boot_file = "BOOTIA32.EFI" + elif config("hostArchitecture", "bits") == 64: + boot_file = "BOOTX64.EFI" + elif cpu_family == "arm": + if ( + config("hostArchitecture", "arch") == "armv8-a" + and config("hostArchitecture", "bits") == 64 + ): + boot_file = "BOOTAA64.EFI" else: - raise Exception(f'Unsupported CPU arch: {config("hostArchitecture", "arch")}') + raise Exception( + f"Unsupported CPU arch: {config('hostArchitecture', 'arch')}" + ) else: - raise Exception(f'Unsupported CPU family: {cpu_family}') - - efi_path = os.path.join(str(config('liminePath')), 'share', 'limine', boot_file) - dest_path = os.path.join(str(config('efiMountPoint')), 'efi', 'boot' if config('efiRemovable') else 'limine', boot_file) + raise Exception(f"Unsupported CPU family: {cpu_family}") + + efi_path = os.path.join(str(config("liminePath")), "share", "limine", boot_file) + dest_path = os.path.join( + str(config("efiMountPoint")), + "efi", + "boot" if config("efiRemovable") else "limine", + boot_file, + ) copy_file(efi_path, dest_path) - if config('enrollConfig'): + if config("enrollConfig"): b2sum = hashlib.blake2b() b2sum.update(config_file.strip().encode()) try: - subprocess.run([limine_binary, 'enroll-config', dest_path, b2sum.hexdigest()]) + subprocess.run( + [limine_binary, "enroll-config", dest_path, b2sum.hexdigest()] + ) except: - print('error: failed to enroll limine config.', file=sys.stderr) + print("error: failed to enroll limine config.", file=sys.stderr) sys.exit(1) - if config('secureBoot', 'enable'): - sbctl = os.path.join(str(config('secureBoot', 'sbctl')), 'bin', 'sbctl') - if config('secureBoot', 'createAndEnrollKeys'): + if config("secureBoot", "enable"): + sbctl = os.path.join(str(config("secureBoot", "sbctl")), "bin", "sbctl") + if config("secureBoot", "createAndEnrollKeys"): print("TEST MODE: creating and enrolling keys") try: - subprocess.run([sbctl, 'create-keys']) + subprocess.run([sbctl, "create-keys"]) except: - print('error: failed to create keys', file=sys.stderr) + print("error: failed to create keys", file=sys.stderr) sys.exit(1) try: - subprocess.run([sbctl, 'enroll-keys', '--yes-this-might-brick-my-machine']) + subprocess.run( + [sbctl, "enroll-keys", "--yes-this-might-brick-my-machine"] + ) except: - print('error: failed to enroll keys', file=sys.stderr) + print("error: failed to enroll keys", file=sys.stderr) sys.exit(1) - print('signing limine...') + print("signing limine...") try: - subprocess.run([sbctl, 'sign', dest_path]) + subprocess.run([sbctl, "sign", dest_path]) except: - print('error: failed to sign limine', file=sys.stderr) + print("error: failed to sign limine", file=sys.stderr) sys.exit(1) - if not config('efiRemovable') and not config('canTouchEfiVariables'): - print('warning: boot.loader.efi.canTouchEfiVariables is set to false while boot.loader.limine.efiInstallAsRemovable.\n This may render the system unbootable.') + if not config("efiRemovable") and not config("canTouchEfiVariables"): + print( + "warning: boot.loader.efi.canTouchEfiVariables is set to false while boot.loader.limine.efiInstallAsRemovable.\n This may render the system unbootable." + ) - if config('canTouchEfiVariables'): - if config('efiRemovable'): - print('note: boot.loader.limine.efiInstallAsRemovable is true, no need to add EFI entry.') + if config("canTouchEfiVariables"): + if config("efiRemovable"): + print( + "note: boot.loader.limine.efiInstallAsRemovable is true, no need to add EFI entry." + ) else: - efibootmgr = os.path.join(str(config('efiBootMgrPath')), 'bin', 'efibootmgr') - efi_partition = find_mounted_device(str(config('efiMountPoint'))) + efibootmgr = os.path.join( + str(config("efiBootMgrPath")), "bin", "efibootmgr" + ) + efi_partition = find_mounted_device(str(config("efiMountPoint"))) efi_disk = find_disk_device(efi_partition) - efibootmgr_output = subprocess.check_output([efibootmgr], stderr=subprocess.STDOUT, universal_newlines=True) + efibootmgr_output = subprocess.check_output( + [efibootmgr], stderr=subprocess.STDOUT, universal_newlines=True + ) # Check the output of `efibootmgr` to find if limine is already installed and present in the boot record limine_boot_entry = None - if matches := re.findall(r'Boot([0-9a-fA-F]{4})\*? Limine', efibootmgr_output): + if matches := re.findall( + r"Boot([0-9a-fA-F]{4})\*? Limine", efibootmgr_output + ): limine_boot_entry = matches[0] # If there's already a Limine entry, replace it if limine_boot_entry: - boot_order = re.findall(r'BootOrder: ((?:[0-9a-fA-F]{4},?)*)', efibootmgr_output)[0] - - efibootmgr_output = subprocess.check_output([ - efibootmgr, - '-b', limine_boot_entry, - '-B', - ], stderr=subprocess.STDOUT, universal_newlines=True) - - efibootmgr_output = subprocess.check_output([ - efibootmgr, - '-c', - '-b', limine_boot_entry, - '-d', efi_disk, - '-p', efi_partition.removeprefix(efi_disk).removeprefix('p'), - '-l', f'\\efi\\limine\\{boot_file}', - '-L', 'Limine', - '-o', boot_order, - ], stderr=subprocess.STDOUT, universal_newlines=True) + boot_order = re.findall( + r"BootOrder: ((?:[0-9a-fA-F]{4},?)*)", efibootmgr_output + )[0] + + efibootmgr_output = subprocess.check_output( + [ + efibootmgr, + "-b", + limine_boot_entry, + "-B", + ], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + + efibootmgr_output = subprocess.check_output( + [ + efibootmgr, + "-c", + "-b", + limine_boot_entry, + "-d", + efi_disk, + "-p", + efi_partition.removeprefix(efi_disk).removeprefix("p"), + "-l", + f"\\efi\\limine\\{boot_file}", + "-L", + "Limine", + "-o", + boot_order, + ], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) else: - efibootmgr_output = subprocess.check_output([ - efibootmgr, - '-c', - '-d', efi_disk, - '-p', efi_partition.removeprefix(efi_disk).removeprefix('p'), - '-l', f'\\efi\\limine\\{boot_file}', - '-L', 'Limine', - ], stderr=subprocess.STDOUT, universal_newlines=True) - - if config('biosSupport'): - if cpu_family != 'x86': - raise Exception(f'Unsupported CPU family for BIOS install: {cpu_family}') - - limine_sys = os.path.join(str(config('liminePath')), 'share', 'limine', 'limine-bios.sys') - limine_sys_dest = os.path.join(limine_install_dir, 'limine-bios.sys') + efibootmgr_output = subprocess.check_output( + [ + efibootmgr, + "-c", + "-d", + efi_disk, + "-p", + efi_partition.removeprefix(efi_disk).removeprefix("p"), + "-l", + f"\\efi\\limine\\{boot_file}", + "-L", + "Limine", + ], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + + if config("biosSupport"): + if cpu_family != "x86": + raise Exception(f"Unsupported CPU family for BIOS install: {cpu_family}") + + limine_sys = os.path.join( + str(config("liminePath")), "share", "limine", "limine-bios.sys" + ) + limine_sys_dest = os.path.join(limine_install_dir, "limine-bios.sys") copy_file(limine_sys, limine_sys_dest) - device = str(config('biosDevice')) + device = str(config("biosDevice")) - if device == 'nodev': - print("note: boot.loader.limine.biosSupport is set, but device is set to nodev, only the stage 2 bootloader will be installed.", file=sys.stderr) + if device == "nodev": + print( + "note: boot.loader.limine.biosSupport is set, but device is set to nodev, only the stage 2 bootloader will be installed.", + file=sys.stderr, + ) return - limine_deploy_args: List[str] = [limine_binary, 'bios-install', device] + limine_deploy_args: List[str] = [limine_binary, "bios-install", device] - if config('partitionIndex'): - limine_deploy_args.append(str(config('partitionIndex'))) + if config("partitionIndex"): + limine_deploy_args.append(str(config("partitionIndex"))) - if config('force'): - limine_deploy_args.append('--force') + if config("force"): + limine_deploy_args.append("--force") try: subprocess.run(limine_deploy_args) except: raise Exception( - 'Failed to deploy BIOS stage 1 Limine bootloader!\n' + - 'You might want to try enabling the `boot.loader.limine.force` option.') + "Failed to deploy BIOS stage 1 Limine bootloader!\n" + + "You might want to try enabling the `boot.loader.limine.force` option." + ) print("removing unused boot files...") for path in paths: if not paths[path] and os.path.exists(path): os.remove(path) + def main() -> None: try: install_bootloader() @@ -664,7 +802,11 @@ def main() -> None: # event sync the efi filesystem after each update. rc = libc.syncfs(os.open(f"{str(config('efiMountPoint'))}", os.O_RDONLY)) if rc != 0: - print(f"could not sync {str(config('efiMountPoint'))}: {os.strerror(rc)}", file=sys.stderr) + print( + f"could not sync {str(config('efiMountPoint'))}: {os.strerror(rc)}", + file=sys.stderr, + ) + -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/nixos/modules/system/boot/loader/refind/refind-install.py b/nixos/modules/system/boot/loader/refind/refind-install.py index f546d3d7b5680..e524e15a613c7 100644 --- a/nixos/modules/system/boot/loader/refind/refind-install.py +++ b/nixos/modules/system/boot/loader/refind/refind-install.py @@ -16,7 +16,7 @@ refind_dir = None libc = CDLL("libc.so.6") -install_config = json.load(open('@configPath@', 'r')) +install_config = json.load(open("@configPath@", "r")) def config(*path: str) -> Optional[Any]: @@ -26,59 +26,69 @@ def config(*path: str) -> Optional[Any]: return result -def get_system_path(profile: str = 'system', gen: Optional[str] = None, spec: Optional[str] = None) -> str: - basename = f'{profile}-{gen}-link' if gen is not None else profile - profiles_dir = '/nix/var/nix/profiles' - if profile == 'system': +def get_system_path( + profile: str = "system", gen: Optional[str] = None, spec: Optional[str] = None +) -> str: + basename = f"{profile}-{gen}-link" if gen is not None else profile + profiles_dir = "/nix/var/nix/profiles" + if profile == "system": result = os.path.join(profiles_dir, basename) else: - result = os.path.join(profiles_dir, 'system-profiles', basename) + result = os.path.join(profiles_dir, "system-profiles", basename) if spec is not None: - result = os.path.join(result, 'specialisation', spec) + result = os.path.join(result, "specialisation", spec) return result def get_profiles() -> List[str]: - profiles_dir = '/nix/var/nix/profiles/system-profiles/' + profiles_dir = "/nix/var/nix/profiles/system-profiles/" dirs = os.listdir(profiles_dir) if os.path.isdir(profiles_dir) else [] - return [path for path in dirs if not path.endswith('-link')] - - -def get_gens(profile: str = 'system') -> List[Tuple[int, List[str]]]: - nix_env = os.path.join(config('nixPath'), 'bin', 'nix-env') - output = subprocess.check_output([ - nix_env, '--list-generations', - '-p', get_system_path(profile), - '--option', 'build-users-group', '', - ], universal_newlines=True) + return [path for path in dirs if not path.endswith("-link")] + + +def get_gens(profile: str = "system") -> List[Tuple[int, List[str]]]: + nix_env = os.path.join(config("nixPath"), "bin", "nix-env") + output = subprocess.check_output( + [ + nix_env, + "--list-generations", + "-p", + get_system_path(profile), + "--option", + "build-users-group", + "", + ], + universal_newlines=True, + ) gen_lines = output.splitlines() gen_nums = [int(line.split()[0]) for line in gen_lines] - return [gen for gen in gen_nums][-config('maxGenerations'):] + return [gen for gen in gen_nums][-config("maxGenerations") :] def is_encrypted(device: str) -> bool: - for name, _ in config('luksDevices'): - if os.readlink(os.path.join('/dev/mapper', name)) == os.readlink(device): + for name, _ in config("luksDevices"): + if os.readlink(os.path.join("/dev/mapper", name)) == os.readlink(device): return True return False def is_fs_type_supported(fs_type: str) -> bool: - return fs_type.startswith('vfat') + return fs_type.startswith("vfat") paths = {} + def get_copied_path_uri(path: str, target: str) -> str: package_id = os.path.basename(os.path.dirname(path)) suffix = os.path.basename(path) - dest_file = f'{package_id}-{suffix}' + dest_file = f"{package_id}-{suffix}" dest_path = os.path.join(refind_dir, target, dest_file) if not os.path.exists(dest_path): @@ -86,13 +96,16 @@ def get_copied_path_uri(path: str, target: str) -> str: else: paths[dest_path] = True - return os.path.join('/efi/refind', target, dest_file) + return os.path.join("/efi/refind", target, dest_file) + def get_path_uri(path: str) -> str: return get_copied_path_uri(path, "") -def get_file_uri(profile: str, gen: Optional[str], spec: Optional[str], name: str) -> str: +def get_file_uri( + profile: str, gen: Optional[str], spec: Optional[str], name: str +) -> str: gen_path = get_system_path(profile, gen, spec) path_in_store = os.path.realpath(os.path.join(gen_path, name)) return get_path_uri(path_in_store) @@ -116,10 +129,10 @@ class BootSpec: def bootjson_to_bootspec(bootjson: dict) -> BootSpec: - specialisations = bootjson['org.nixos.specialisation.v1'] + specialisations = bootjson["org.nixos.specialisation.v1"] specialisations = {k: bootjson_to_bootspec(v) for k, v in specialisations.items()} return BootSpec( - **bootjson['org.nixos.bootspec.v1'], + **bootjson["org.nixos.bootspec.v1"], specialisations=specialisations, ) @@ -127,22 +140,32 @@ def bootjson_to_bootspec(bootjson: dict) -> BootSpec: def config_entry(is_sub: bool, bootspec: BootSpec, label: str, time: str) -> str: entry = "" if is_sub: - entry += 'sub' + entry += "sub" entry += f'menuentry "{label}" {{\n' - entry += ' loader ' + get_kernel_uri(bootspec.kernel) + '\n' + entry += " loader " + get_kernel_uri(bootspec.kernel) + "\n" if bootspec.initrd: - entry += ' initrd ' + get_kernel_uri(bootspec.initrd) + '\n' + entry += " initrd " + get_kernel_uri(bootspec.initrd) + "\n" - entry += ' options "' + ' '.join(['init=' + bootspec.init] + bootspec.kernelParams).strip() + '"\n' - entry += '}\n' + entry += ( + ' options "' + + " ".join(["init=" + bootspec.init] + bootspec.kernelParams).strip() + + '"\n' + ) + entry += "}\n" return entry -def generate_config_entry(profile: str, gen: str, special: bool, group_name: str) -> str: - time = datetime.datetime.fromtimestamp(os.stat(get_system_path(profile,gen), follow_symlinks=False).st_mtime).strftime("%F %H:%M:%S") - boot_json = json.load(open(os.path.join(get_system_path(profile, gen), 'boot.json'), 'r')) +def generate_config_entry( + profile: str, gen: str, special: bool, group_name: str +) -> str: + time = datetime.datetime.fromtimestamp( + os.stat(get_system_path(profile, gen), follow_symlinks=False).st_mtime + ).strftime("%F %H:%M:%S") + boot_json = json.load( + open(os.path.join(get_system_path(profile, gen), "boot.json"), "r") + ) boot_spec = bootjson_to_bootspec(boot_json) specialisation_list = boot_spec.specialisations.items() @@ -150,24 +173,26 @@ def generate_config_entry(profile: str, gen: str, special: bool, group_name: str if len(specialisation_list) > 0: entry += f'menuentry "NixOS {group_name} Generation {gen}" {{\n' - entry += config_entry(True, boot_spec, f'Default', str(time)) + entry += config_entry(True, boot_spec, f"Default", str(time)) for spec, spec_boot_spec in specialisation_list: - entry += config_entry(True, spec_boot_spec, f'{spec}', str(time)) + entry += config_entry(True, spec_boot_spec, f"{spec}", str(time)) - entry += '}\n' + entry += "}\n" else: - entry += config_entry(False, boot_spec, f'NixOS {group_name} Generation {gen}', str(time)) + entry += config_entry( + False, boot_spec, f"NixOS {group_name} Generation {gen}", str(time) + ) return entry def find_disk_device(part: str) -> str: part = os.path.realpath(part) - part = part.removeprefix('/dev/') - disk = os.path.realpath(os.path.join('/sys', 'class', 'block', part)) + part = part.removeprefix("/dev/") + disk = os.path.realpath(os.path.join("/sys", "class", "block", part)) disk = os.path.dirname(disk) - return os.path.join('/dev', os.path.basename(disk)) + return os.path.join("/dev", os.path.basename(disk)) def find_mounted_device(path: str) -> str: @@ -197,7 +222,7 @@ def copy_file(from_path: str, to_path: str): def install_bootloader() -> None: global refind_dir - refind_dir = os.path.join(str(config('efiMountPoint')), 'efi', 'refind') + refind_dir = os.path.join(str(config("efiMountPoint")), "efi", "refind") if not os.path.exists(refind_dir): os.makedirs(refind_dir) @@ -206,39 +231,43 @@ def install_bootloader() -> None: for file in files: paths[os.path.join(dir, file)] = False - profiles = [('system', get_gens())] + profiles = [("system", get_gens())] for profile in get_profiles(): profiles += [(profile, get_gens(profile))] - timeout = config('timeout') + timeout = config("timeout") last_gen = get_gens()[-1] - last_gen_json = json.load(open(os.path.join(get_system_path('system', last_gen), 'boot.json'), 'r')) + last_gen_json = json.load( + open(os.path.join(get_system_path("system", last_gen), "boot.json"), "r") + ) last_gen_boot_spec = bootjson_to_bootspec(last_gen_json) - config_file = str(config('extraConfig')) + '\n' - config_file += textwrap.dedent(f''' + config_file = str(config("extraConfig")) + "\n" + config_file += textwrap.dedent(f""" timeout {timeout} default_selection {3 if len(last_gen_boot_spec.specialisations.items()) > 0 else 2} - ''') + """) - config_file += textwrap.dedent(''' + config_file += textwrap.dedent(""" # NixOS boot entries start here - ''') + """) - for (profile, gens) in profiles: - group_name = 'default profile' if profile == 'system' else f"profile '{profile}'" + for profile, gens in profiles: + group_name = ( + "default profile" if profile == "system" else f"profile '{profile}'" + ) isFirst = True for gen in sorted(gens, key=lambda x: x, reverse=True): config_file += generate_config_entry(profile, gen, isFirst, group_name) isFirst = False - config_file_path = os.path.join(refind_dir, 'refind.conf') - config_file += '\n# NixOS boot entries end here\n\n' + config_file_path = os.path.join(refind_dir, "refind.conf") + config_file += "\n# NixOS boot entries end here\n\n" - with open(f"{config_file_path}.tmp", 'w') as file: + with open(f"{config_file_path}.tmp", "w") as file: file.truncate() file.write(config_file.strip()) file.flush() @@ -247,80 +276,125 @@ def install_bootloader() -> None: paths[config_file_path] = True - for dest_path, source_path in config('additionalFiles').items(): + for dest_path, source_path in config("additionalFiles").items(): dest_path = os.path.join(refind_dir, dest_path) copy_file(source_path, dest_path) - cpu_family = config('hostArchitecture', 'family') - if cpu_family == 'x86': - if config('hostArchitecture', 'bits') == 32: - boot_file = 'BOOTIA32.EFI' - efi_file = 'refind_ia32.efi' - elif config('hostArchitecture', 'bits') == 64: - boot_file = 'BOOTX64.EFI' - efi_file = 'refind_x64.efi' - elif cpu_family == 'arm': - if config('hostArchitecture', 'arch') == 'armv8-a' and config('hostArchitecture', 'bits') == 64: - boot_file = 'BOOTAA64.EFI' - efi_file = 'refind_aa64.efi' + cpu_family = config("hostArchitecture", "family") + if cpu_family == "x86": + if config("hostArchitecture", "bits") == 32: + boot_file = "BOOTIA32.EFI" + efi_file = "refind_ia32.efi" + elif config("hostArchitecture", "bits") == 64: + boot_file = "BOOTX64.EFI" + efi_file = "refind_x64.efi" + elif cpu_family == "arm": + if ( + config("hostArchitecture", "arch") == "armv8-a" + and config("hostArchitecture", "bits") == 64 + ): + boot_file = "BOOTAA64.EFI" + efi_file = "refind_aa64.efi" else: - raise Exception(f'Unsupported CPU arch: {config("hostArchitecture", "arch")}') + raise Exception( + f"Unsupported CPU arch: {config('hostArchitecture', 'arch')}" + ) else: - raise Exception(f'Unsupported CPU family: {cpu_family}') - - efi_path = os.path.join(config('refindPath'), 'share', 'refind', efi_file) - dest_path = os.path.join(config('efiMountPoint'), 'efi', 'boot' if config('efiRemovable') else 'refind', boot_file) + raise Exception(f"Unsupported CPU family: {cpu_family}") + + efi_path = os.path.join(config("refindPath"), "share", "refind", efi_file) + dest_path = os.path.join( + config("efiMountPoint"), + "efi", + "boot" if config("efiRemovable") else "refind", + boot_file, + ) copy_file(efi_path, dest_path) - if not config('efiRemovable') and not config('canTouchEfiVariables'): - print('warning: boot.loader.efi.canTouchEfiVariables is set to false while boot.loader.limine.efiInstallAsRemovable.\n This may render the system unbootable.') + if not config("efiRemovable") and not config("canTouchEfiVariables"): + print( + "warning: boot.loader.efi.canTouchEfiVariables is set to false while boot.loader.limine.efiInstallAsRemovable.\n This may render the system unbootable." + ) - if config('canTouchEfiVariables'): - if config('efiRemovable'): - print('note: boot.loader.limine.efiInstallAsRemovable is true, no need to add EFI entry.') + if config("canTouchEfiVariables"): + if config("efiRemovable"): + print( + "note: boot.loader.limine.efiInstallAsRemovable is true, no need to add EFI entry." + ) else: - efibootmgr = os.path.join(str(config('efiBootMgrPath')), 'bin', 'efibootmgr') - efi_partition = find_mounted_device(str(config('efiMountPoint'))) + efibootmgr = os.path.join( + str(config("efiBootMgrPath")), "bin", "efibootmgr" + ) + efi_partition = find_mounted_device(str(config("efiMountPoint"))) efi_disk = find_disk_device(efi_partition) - efibootmgr_output = subprocess.check_output([efibootmgr], stderr=subprocess.STDOUT, universal_newlines=True) + efibootmgr_output = subprocess.check_output( + [efibootmgr], stderr=subprocess.STDOUT, universal_newlines=True + ) # Check the output of `efibootmgr` to find if rEFInd is already installed and present in the boot record refind_boot_entry = None - if matches := re.findall(r'Boot([0-9a-fA-F]{4})\*? rEFInd', efibootmgr_output): + if matches := re.findall( + r"Boot([0-9a-fA-F]{4})\*? rEFInd", efibootmgr_output + ): refind_boot_entry = matches[0] # If there's already a Limine entry, replace it if refind_boot_entry: - boot_order = re.findall(r'BootOrder: ((?:[0-9a-fA-F]{4},?)*)', efibootmgr_output)[0] - - efibootmgr_output = subprocess.check_output([ - efibootmgr, - '-b', refind_boot_entry, - '-B', - ], stderr=subprocess.STDOUT, universal_newlines=True) - - efibootmgr_output = subprocess.check_output([ - efibootmgr, - '-c', - '-b', refind_boot_entry, - '-d', efi_disk, - '-p', efi_partition.removeprefix(efi_disk).removeprefix('p'), - '-l', f'\\efi\\refind\\{boot_file}', - '-L', 'rEFInd', - '-o', boot_order, - ], stderr=subprocess.STDOUT, universal_newlines=True) + boot_order = re.findall( + r"BootOrder: ((?:[0-9a-fA-F]{4},?)*)", efibootmgr_output + )[0] + + efibootmgr_output = subprocess.check_output( + [ + efibootmgr, + "-b", + refind_boot_entry, + "-B", + ], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + + efibootmgr_output = subprocess.check_output( + [ + efibootmgr, + "-c", + "-b", + refind_boot_entry, + "-d", + efi_disk, + "-p", + efi_partition.removeprefix(efi_disk).removeprefix("p"), + "-l", + f"\\efi\\refind\\{boot_file}", + "-L", + "rEFInd", + "-o", + boot_order, + ], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) else: - efibootmgr_output = subprocess.check_output([ - efibootmgr, - '-c', - '-d', efi_disk, - '-p', efi_partition.removeprefix(efi_disk).removeprefix('p'), - '-l', f'\\efi\\refind\\{boot_file}', - '-L', 'rEFInd', - ], stderr=subprocess.STDOUT, universal_newlines=True) + efibootmgr_output = subprocess.check_output( + [ + efibootmgr, + "-c", + "-d", + efi_disk, + "-p", + efi_partition.removeprefix(efi_disk).removeprefix("p"), + "-l", + f"\\efi\\refind\\{boot_file}", + "-L", + "rEFInd", + ], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) print("removing unused boot files...") for path in paths: @@ -338,7 +412,11 @@ def main() -> None: # event sync the efi filesystem after each update. rc = libc.syncfs(os.open(f"{config('efiMountPoint')}", os.O_RDONLY)) if rc != 0: - print(f"could not sync {config('efiMountPoint')}: {os.strerror(rc)}", file=sys.stderr) + print( + f"could not sync {config('efiMountPoint')}: {os.strerror(rc)}", + file=sys.stderr, + ) + -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py index 90241b92cd5f5..2ff06a0ed0343 100644 --- a/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py +++ b/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py @@ -19,9 +19,11 @@ EFI_SYS_MOUNT_POINT = Path("@efiSysMountPoint@") BOOT_MOUNT_POINT = Path("@bootMountPoint@") LOADER_CONF = EFI_SYS_MOUNT_POINT / "loader/loader.conf" # Always stored on the ESP -NIXOS_DIR = Path("@nixosDir@".strip("/")) # Path relative to the XBOOTLDR or ESP mount point +NIXOS_DIR = Path( + "@nixosDir@".strip("/") +) # Path relative to the XBOOTLDR or ESP mount point TIMEOUT = "@timeout@" -EDITOR = "@editor@" == "1" # noqa: PLR0133 +EDITOR = "@editor@" == "1" # noqa: PLR0133 CONSOLE_MODE = "@consoleMode@" BOOTSPEC_TOOLS = "@bootspecTools@" DISTRO_NAME = "@distroName@" @@ -35,6 +37,7 @@ CHECK_MOUNTPOINTS = "@checkMountpoints@" STORE_DIR = "@storeDir@" + @dataclass class BootSpec: init: Path @@ -54,9 +57,13 @@ class BootSpec: FILE = None | int -def run(cmd: Sequence[str | Path], stdout: FILE = None) -> subprocess.CompletedProcess[str]: + +def run( + cmd: Sequence[str | Path], stdout: FILE = None +) -> subprocess.CompletedProcess[str]: return subprocess.run(cmd, check=True, text=True, stdout=stdout) + class SystemIdentifier(NamedTuple): profile: str | None generation: int @@ -65,7 +72,9 @@ class SystemIdentifier(NamedTuple): def copy_if_not_exists(source: Path, dest: Path) -> None: if not dest.exists(): - tmpfd, tmppath = tempfile.mkstemp(dir=dest.parent, prefix=dest.name, suffix='.tmp.') + tmpfd, tmppath = tempfile.mkstemp( + dir=dest.parent, prefix=dest.name, suffix=".tmp." + ) shutil.copyfile(source, tmppath) os.fsync(tmpfd) shutil.move(tmppath, dest) @@ -73,17 +82,23 @@ def copy_if_not_exists(source: Path, dest: Path) -> None: def generation_dir(profile: str | None, generation: int) -> Path: if profile: - return Path(f"/nix/var/nix/profiles/system-profiles/{profile}-{generation}-link") + return Path( + f"/nix/var/nix/profiles/system-profiles/{profile}-{generation}-link" + ) else: return Path(f"/nix/var/nix/profiles/system-{generation}-link") -def system_dir(profile: str | None, generation: int, specialisation: str | None) -> Path: + +def system_dir( + profile: str | None, generation: int, specialisation: str | None +) -> Path: d = generation_dir(profile, generation) if specialisation: return d / "specialisation" / specialisation else: return d + BOOT_ENTRY = """title {title} sort-key {sort_key} version Generation {generation} {description} @@ -92,7 +107,10 @@ def system_dir(profile: str | None, generation: int, specialisation: str | None) options {kernel_params} """ -def generation_conf_filename(profile: str | None, generation: int, specialisation: str | None) -> str: + +def generation_conf_filename( + profile: str | None, generation: int, specialisation: str | None +) -> str: pieces = [ "nixos", profile or None, @@ -103,11 +121,16 @@ def generation_conf_filename(profile: str | None, generation: int, specialisatio return "-".join(p for p in pieces if p) + ".conf" -def write_loader_conf(profile: str | None, generation: int, specialisation: str | None) -> None: +def write_loader_conf( + profile: str | None, generation: int, specialisation: str | None +) -> None: tmp = LOADER_CONF.with_suffix(".tmp") - with tmp.open('x') as f: + with tmp.open("x") as f: f.write(f"timeout {TIMEOUT}\n") - f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation)) + f.write( + "default %s\n" + % generation_conf_filename(profile, generation, specialisation) + ) if not EDITOR: f.write("editor 0\n") if REBOOT_FOR_BITLOCKER: @@ -127,7 +150,9 @@ def get_bootspec(profile: str | None, generation: int) -> BootSpec: try: bootspec_json = json.load(f) except ValueError as e: - print(f"error: Malformed Json: {e}, in {boot_json_path}", file=sys.stderr) + print( + f"error: Malformed Json: {e}, in {boot_json_path}", file=sys.stderr + ) sys.exit(1) else: boot_json_str = run( @@ -143,17 +168,18 @@ def get_bootspec(profile: str | None, generation: int) -> BootSpec: bootspec_json = json.loads(boot_json_str) return bootspec_from_json(bootspec_json) + def bootspec_from_json(bootspec_json: dict[str, Any]) -> BootSpec: - specialisations = bootspec_json['org.nixos.specialisation.v1'] + specialisations = bootspec_json["org.nixos.specialisation.v1"] specialisations = {k: bootspec_from_json(v) for k, v in specialisations.items()} - systemdBootExtension = bootspec_json.get('org.nixos.systemd-boot', {}) - sortKey = systemdBootExtension.get('sortKey', 'nixos') - devicetree = systemdBootExtension.get('devicetree') + systemdBootExtension = bootspec_json.get("org.nixos.systemd-boot", {}) + sortKey = systemdBootExtension.get("sortKey", "nixos") + devicetree = systemdBootExtension.get("devicetree") if devicetree: devicetree = Path(devicetree) - main_json = bootspec_json['org.nixos.bootspec.v1'] + main_json = bootspec_json["org.nixos.bootspec.v1"] for attr in ("kernel", "initrd", "toplevel"): if attr in main_json: main_json[attr] = Path(main_json[attr]) @@ -172,24 +198,35 @@ def copy_from_file(file: Path, dry_run: bool = False) -> Path: store_file_path = file.resolve() suffix = store_file_path.name store_subdir = store_file_path.relative_to(STORE_DIR).parts[0] - efi_file_path = NIXOS_DIR / (f"{suffix}.efi" if suffix == store_subdir else f"{store_subdir}-{suffix}.efi") + efi_file_path = NIXOS_DIR / ( + f"{suffix}.efi" if suffix == store_subdir else f"{store_subdir}-{suffix}.efi" + ) if not dry_run: copy_if_not_exists(store_file_path, BOOT_MOUNT_POINT / efi_file_path) return efi_file_path -def write_entry(profile: str | None, generation: int, specialisation: str | None, - machine_id: str | None, bootspec: BootSpec, current: bool) -> None: +def write_entry( + profile: str | None, + generation: int, + specialisation: str | None, + machine_id: str | None, + bootspec: BootSpec, + current: bool, +) -> None: if specialisation: bootspec = bootspec.specialisations[specialisation] kernel = copy_from_file(bootspec.kernel) initrd = copy_from_file(bootspec.initrd) - devicetree = copy_from_file(bootspec.devicetree) if bootspec.devicetree is not None else None + devicetree = ( + copy_from_file(bootspec.devicetree) if bootspec.devicetree is not None else None + ) title = "{name}{profile}{specialisation}".format( name=DISTRO_NAME, profile=" [" + profile + "]" if profile else "", - specialisation=" (%s)" % specialisation if specialisation else "") + specialisation=" (%s)" % specialisation if specialisation else "", + ) try: if bootspec.initrdSecrets is not None: @@ -199,26 +236,40 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None print("failed to create initrd secrets!", file=sys.stderr) sys.exit(1) else: - print("warning: failed to create initrd secrets " - f'for "{title} - Configuration {generation}", an older generation', file=sys.stderr) - print("note: this is normal after having removed " - "or renamed a file in `boot.initrd.secrets`", file=sys.stderr) - entry_file = BOOT_MOUNT_POINT / "loader/entries" / generation_conf_filename(profile, generation, specialisation) + print( + "warning: failed to create initrd secrets " + f'for "{title} - Configuration {generation}", an older generation', + file=sys.stderr, + ) + print( + "note: this is normal after having removed " + "or renamed a file in `boot.initrd.secrets`", + file=sys.stderr, + ) + entry_file = ( + BOOT_MOUNT_POINT + / "loader/entries" + / generation_conf_filename(profile, generation, specialisation) + ) tmp_path = entry_file.with_suffix(".tmp") kernel_params = "init=%s " % bootspec.init kernel_params = kernel_params + " ".join(bootspec.kernelParams) build_time = int(system_dir(profile, generation, specialisation).stat().st_ctime) - build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F') + build_date = datetime.datetime.fromtimestamp(build_time).strftime("%F") with tmp_path.open("w") as f: - f.write(BOOT_ENTRY.format(title=title, - sort_key=bootspec.sortKey, - generation=generation, - kernel=f"/{kernel}", - initrd=f"/{initrd}", - kernel_params=kernel_params, - description=f"{bootspec.label}, built on {build_date}")) + f.write( + BOOT_ENTRY.format( + title=title, + sort_key=bootspec.sortKey, + generation=generation, + kernel=f"/{kernel}", + initrd=f"/{initrd}", + kernel_params=kernel_params, + description=f"{bootspec.label}, built on {build_date}", + ) + ) if machine_id is not None: f.write("machine-id %s\n" % machine_id) if devicetree is not None: @@ -245,9 +296,7 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]: configurationLimit = CONFIGURATION_LIMIT configurations = [ SystemIdentifier( - profile=profile, - generation=int(line.split()[0]), - specialisation=None + profile=profile, generation=int(line.split()[0]), specialisation=None ) for line in gen_lines ] @@ -256,7 +305,9 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]: def remove_old_entries(gens: list[SystemIdentifier]) -> None: rex_profile = re.compile(r"^nixos-(.*)-generation-.*\.conf$") - rex_generation = re.compile(r"^nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$") + rex_generation = re.compile( + r"^nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$" + ) known_paths = [] for gen in gens: bootspec = get_bootspec(gen.profile, gen.generation) @@ -264,7 +315,9 @@ def remove_old_entries(gens: list[SystemIdentifier]) -> None: known_paths.append(copy_from_file(bootspec.initrd, True).name) if bootspec.devicetree is not None: known_paths.append(copy_from_file(bootspec.devicetree, True).name) - for path in (BOOT_MOUNT_POINT / "loader/entries").glob("nixos*-generation-[1-9]*.conf", case_sensitive=False): + for path in (BOOT_MOUNT_POINT / "loader/entries").glob( + "nixos*-generation-[1-9]*.conf", case_sensitive=False + ): if rex_profile.match(path.name): prof = rex_profile.sub(r"\1", path.name) else: @@ -291,12 +344,13 @@ def cleanup_esp() -> None: def get_profiles() -> list[str]: system_profiles = Path("/nix/var/nix/profiles/system-profiles/") if system_profiles.is_dir(): - return [x.name - for x in system_profiles.iterdir() - if not x.name.endswith("-link")] + return [ + x.name for x in system_profiles.iterdir() if not x.name.endswith("-link") + ] else: return [] + def install_bootloader(args: argparse.Namespace) -> None: try: with open("/etc/machine-id") as machine_file: @@ -307,7 +361,10 @@ def install_bootloader(args: argparse.Namespace) -> None: machine_id = None if os.getenv("NIXOS_INSTALL_GRUB") == "1": - warnings.warn("NIXOS_INSTALL_GRUB env var deprecated, use NIXOS_INSTALL_BOOTLOADER", DeprecationWarning) + warnings.warn( + "NIXOS_INSTALL_GRUB env var deprecated, use NIXOS_INSTALL_BOOTLOADER", + DeprecationWarning, + ) os.environ["NIXOS_INSTALL_BOOTLOADER"] = "1" # flags to pass to bootctl install/update @@ -351,13 +408,18 @@ def install_bootloader(args: argparse.Namespace) -> None: # ESP: /boot (/dev/disk/by-partuuid/9b39b4c4-c48b-4ebf-bfea-a56b2395b7e0) # File: ├─/EFI/systemd/HashTool.efi # └─/EFI/systemd/systemd-bootx64.efi (systemd-boot 255.2) - installed_match = re.search(r"^\W+.*/EFI/(?:BOOT|systemd)/.*\.efi \(systemd-boot ([\d.]+[^)]*)\)$", - installed_out, re.IGNORECASE | re.MULTILINE) + installed_match = re.search( + r"^\W+.*/EFI/(?:BOOT|systemd)/.*\.efi \(systemd-boot ([\d.]+[^)]*)\)$", + installed_out, + re.IGNORECASE | re.MULTILINE, + ) available_match = re.search(r"^\((.*)\)$", available_out) if installed_match is None: - raise Exception("Could not find any previously installed systemd-boot. If you are switching to systemd-boot from a different bootloader, you need to run `nixos-rebuild switch --install-bootloader`") + raise Exception( + "Could not find any previously installed systemd-boot. If you are switching to systemd-boot from a different bootloader, you need to run `nixos-rebuild switch --install-bootloader`" + ) if available_match is None: raise Exception("could not determine systemd-boot version") @@ -366,7 +428,11 @@ def install_bootloader(args: argparse.Namespace) -> None: available_version = available_match.group(1) if installed_version < available_version: - print("updating systemd-boot from %s to %s" % (installed_version, available_version), file=sys.stderr) + print( + "updating systemd-boot from %s to %s" + % (installed_version, available_version), + file=sys.stderr, + ) run( [f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}"] + bootctl_flags @@ -388,14 +454,28 @@ def install_bootloader(args: argparse.Namespace) -> None: is_default = Path(bootspec.init).parent == Path(args.default_config) write_entry(*gen, machine_id, bootspec, current=is_default) for specialisation in bootspec.specialisations.keys(): - write_entry(gen.profile, gen.generation, specialisation, machine_id, bootspec, current=is_default) + write_entry( + gen.profile, + gen.generation, + specialisation, + machine_id, + bootspec, + current=is_default, + ) if is_default: write_loader_conf(*gen) except OSError as e: # See https://github.com/NixOS/nixpkgs/issues/114552 if e.errno == errno.EINVAL: - profile = f"profile '{gen.profile}'" if gen.profile else "default profile" - print("ignoring {} in the list of boot entries because of the following error:\n{}".format(profile, e), file=sys.stderr) + profile = ( + f"profile '{gen.profile}'" if gen.profile else "default profile" + ) + print( + "ignoring {} in the list of boot entries because of the following error:\n{}".format( + profile, e + ), + file=sys.stderr, + ) else: raise e @@ -425,8 +505,14 @@ def install_bootloader(args: argparse.Namespace) -> None: def main() -> None: - parser = argparse.ArgumentParser(description=f"Update {DISTRO_NAME}-related systemd-boot files") - parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help=f"The default {DISTRO_NAME} config to boot") + parser = argparse.ArgumentParser( + description=f"Update {DISTRO_NAME}-related systemd-boot files" + ) + parser.add_argument( + "default_config", + metavar="DEFAULT-CONFIG", + help=f"The default {DISTRO_NAME} config to boot", + ) args = parser.parse_args() run([CHECK_MOUNTPOINTS]) @@ -440,13 +526,18 @@ def main() -> None: # event sync the efi filesystem after each update. rc = libc.syncfs(os.open(f"{BOOT_MOUNT_POINT}", os.O_RDONLY)) if rc != 0: - print(f"could not sync {BOOT_MOUNT_POINT}: {os.strerror(rc)}", file=sys.stderr) + print( + f"could not sync {BOOT_MOUNT_POINT}: {os.strerror(rc)}", file=sys.stderr + ) if BOOT_MOUNT_POINT != EFI_SYS_MOUNT_POINT: rc = libc.syncfs(os.open(EFI_SYS_MOUNT_POINT, os.O_RDONLY)) if rc != 0: - print(f"could not sync {EFI_SYS_MOUNT_POINT}: {os.strerror(rc)}", file=sys.stderr) + print( + f"could not sync {EFI_SYS_MOUNT_POINT}: {os.strerror(rc)}", + file=sys.stderr, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/nixos/tests/acme/python-utils.py b/nixos/tests/acme/python-utils.py index d542324084aa1..4e1b649d5d6a3 100644 --- a/nixos/tests/acme/python-utils.py +++ b/nixos/tests/acme/python-utils.py @@ -3,6 +3,7 @@ TOTAL_RETRIES = 20 + # BackoffTracker provides a robust system for handling test retries class BackoffTracker: delay = 1 @@ -34,16 +35,19 @@ def wrapper(*args, retries: int = 0, **kwargs): backoff = BackoffTracker() + def run(node, cmd, fail=False): if fail: return node.fail(cmd) else: return node.succeed(cmd) + # Waits for the system to finish booting or switching configuration def wait_for_running(node): node.succeed("systemctl is-system-running --wait") + # On first switch, this will create a symlink to the current system so that we can # quickly switch between derivations def switch_to(node, name, fail=False) -> None: @@ -65,6 +69,7 @@ def switch_to(node, name, fail=False) -> None: if not fail: wait_for_running(node) + # Ensures the issuer of our cert matches the chain # and matches the issuer we expect it to be. # It's a good validation to ensure the cert.pem and fullchain.pem @@ -75,15 +80,17 @@ def check_issuer(node, cert_name, issuer) -> None: actual_issuer = node.succeed( f"openssl x509 -noout -issuer -in /var/lib/acme/{cert_name}/{fname}" ).partition("=")[2] - assert ( - issuer.lower() in actual_issuer.lower() - ), f"{fname} issuer mismatch. Expected {issuer} got {actual_issuer}" + assert issuer.lower() in actual_issuer.lower(), ( + f"{fname} issuer mismatch. Expected {issuer} got {actual_issuer}" + ) + # Ensures the provided domain matches with the given cert def check_domain(node, cert_name, domain, fail=False) -> None: cmd = f"openssl x509 -noout -checkhost '{domain}' -in /var/lib/acme/{cert_name}/cert.pem" run(node, cmd, fail=fail) + # Ensures the required values for OCSP stapling are present # Pebble doesn't provide a full OCSP responder, so just checks the URL def check_stapling(node, cert_name, ca_domain, fail=False): @@ -99,6 +106,7 @@ def check_stapling(node, cert_name, ca_domain, fail=False): fail=fail, ) + # Checks the keyType by validating the number of bits def check_key_bits(node, cert_name, bits, fail=False): run( @@ -108,6 +116,7 @@ def check_key_bits(node, cert_name, bits, fail=False): fail=fail, ) + # Ensure cert comes before chain in fullchain.pem def check_fullchain(node, cert_name): cert_file = f"/var/lib/acme/{cert_name}/fullchain.pem" @@ -115,8 +124,7 @@ def check_fullchain(node, cert_name): assert len(num_certs.strip().split("\n")) > 1, "Insufficient certs in fullchain.pem" first_cert_data = node.succeed( - f"grep -m1 -B50 'END CERTIFICATE' {cert_file}" - " | openssl x509 -noout -text" + f"grep -m1 -B50 'END CERTIFICATE' {cert_file} | openssl x509 -noout -text" ) for line in first_cert_data.lower().split("\n"): if "dns:" in line: @@ -126,6 +134,7 @@ def check_fullchain(node, cert_name): assert False + # Checks the permissions in the cert directories are as expected def check_permissions(node, cert_name, group): stat = "stat -L -c '%a %U %G' " @@ -159,7 +168,8 @@ def check_connection(node, domain, fail=False, minica=False): cafile = "/tmp/ca.crt" if minica: cafile = "/var/lib/acme/.minica/cert.pem" - run(node, + run( + node, f"openssl s_client -brief -CAfile {cafile}" f" -verify 2 -verify_return_error -verify_hostname {domain}" f" -servername {domain} -connect {domain}:443 < /dev/null", diff --git a/nixos/tests/google-oslogin/server.py b/nixos/tests/google-oslogin/server.py index 622cd86b26195..875510f6d1541 100755 --- a/nixos/tests/google-oslogin/server.py +++ b/nixos/tests/google-oslogin/server.py @@ -10,9 +10,9 @@ from urllib.parse import urlparse, parse_qs from typing import Dict -SNAKEOIL_PUBLIC_KEY = os.environ['SNAKEOIL_PUBLIC_KEY'] -MOCKUSER="mockuser_nixos_org" -MOCKADMIN="mockadmin_nixos_org" +SNAKEOIL_PUBLIC_KEY = os.environ["SNAKEOIL_PUBLIC_KEY"] +MOCKUSER = "mockuser_nixos_org" +MOCKADMIN = "mockadmin_nixos_org" def w(msg: bytes): @@ -30,7 +30,9 @@ def gen_email(username: str): return str(int(hashlib.sha256(username.encode()).hexdigest(), 16))[0:21] -def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoil_pubkey: str) -> Dict: +def gen_mockuser( + username: str, uid: str, gid: str, home_directory: str, snakeoil_pubkey: str +) -> Dict: snakeoil_pubkey_fingerprint = gen_fingerprint(snakeoil_pubkey) # seems to be a 21 characters long numberstring, so mimic that in a reproducible way email = gen_email(username) @@ -45,26 +47,27 @@ def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoi "uid": uid, "gid": gid, "homeDirectory": home_directory, - "operatingSystemType": "LINUX" + "operatingSystemType": "LINUX", } ], "sshPublicKeys": { snakeoil_pubkey_fingerprint: { "key": snakeoil_pubkey, - "expirationTimeUsec": str((time.time() + 600) * 1000000), # 10 minutes in the future - "fingerprint": snakeoil_pubkey_fingerprint + "expirationTimeUsec": str( + (time.time() + 600) * 1000000 + ), # 10 minutes in the future + "fingerprint": snakeoil_pubkey_fingerprint, } - } + }, } ] } class ReqHandler(BaseHTTPRequestHandler): - def _send_json_ok(self, data: dict): self.send_response(200) - self.send_header('Content-type', 'application/json') + self.send_header("Content-type", "application/json") self.end_headers() out = json.dumps(data).encode() w(out) @@ -72,7 +75,7 @@ def _send_json_ok(self, data: dict): def _send_json_success(self, success=True): self.send_response(200) - self.send_header('Content-type', 'application/json') + self.send_header("Content-type", "application/json") self.end_headers() out = json.dumps({"success": success}).encode() w(out) @@ -90,27 +93,39 @@ def do_GET(self): # users endpoint if pu.path == "/computeMetadata/v1/oslogin/users": # mockuser and mockadmin are allowed to login, both use the same snakeoil public key - if params.get('username') == [MOCKUSER] or params.get('uid') == ["1009719690"]: + if params.get("username") == [MOCKUSER] or params.get("uid") == [ + "1009719690" + ]: username = MOCKUSER uid = "1009719690" - elif params.get('username') == [MOCKADMIN] or params.get('uid') == ["1009719691"]: + elif params.get("username") == [MOCKADMIN] or params.get("uid") == [ + "1009719691" + ]: username = MOCKADMIN uid = "1009719691" else: self._send_404() return - self._send_json_ok(gen_mockuser(username=username, uid=uid, gid=uid, home_directory=f"/home/{username}", snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY)) + self._send_json_ok( + gen_mockuser( + username=username, + uid=uid, + gid=uid, + home_directory=f"/home/{username}", + snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY, + ) + ) return # we need to provide something at the groups endpoint. # the nss module does segfault if we don't. elif pu.path == "/computeMetadata/v1/oslogin/groups": - self._send_json_ok({ - "posixGroups": [ - {"name" : "demo", "gid" : 4294967295} - ], - }) + self._send_json_ok( + { + "posixGroups": [{"name": "demo", "gid": 4294967295}], + } + ) return # authorize endpoint @@ -118,7 +133,9 @@ def do_GET(self): # is user allowed to login? if params.get("policy") == ["login"]: # mockuser and mockadmin are allowed to login - if params.get('email') == [gen_email(MOCKUSER)] or params.get('email') == [gen_email(MOCKADMIN)]: + if params.get("email") == [gen_email(MOCKUSER)] or params.get( + "email" + ) == [gen_email(MOCKADMIN)]: self._send_json_success() return self._send_json_success(False) @@ -126,7 +143,7 @@ def do_GET(self): # is user allowed to become root? elif params.get("policy") == ["adminLogin"]: # only mockadmin is allowed to become admin - self._send_json_success((params['email'] == [gen_email(MOCKADMIN)])) + self._send_json_success((params["email"] == [gen_email(MOCKADMIN)])) return # send 404 for other policies else: @@ -137,9 +154,9 @@ def do_GET(self): sys.stderr.flush() self.send_response(404) self.end_headers() - self.wfile.write(b'') + self.wfile.write(b"") -if __name__ == '__main__': - s = HTTPServer(('0.0.0.0', 80), ReqHandler) +if __name__ == "__main__": + s = HTTPServer(("0.0.0.0", 80), ReqHandler) s.serve_forever() diff --git a/nixos/tests/pam/test_chfn.py b/nixos/tests/pam/test_chfn.py index 900feb35f4b3c..fc1f4f454e5c8 100644 --- a/nixos/tests/pam/test_chfn.py +++ b/nixos/tests/pam/test_chfn.py @@ -25,4 +25,6 @@ assert not missing_lines, f"Missing lines: {missing_lines}" with subtest("All remaining lines are empty or comments"): - assert not unexpected_functional_lines, f"Unexpected lines: {unexpected_functional_lines}" + assert not unexpected_functional_lines, ( + f"Unexpected lines: {unexpected_functional_lines}" + ) diff --git a/nixos/tests/spark/spark_sample.py b/nixos/tests/spark/spark_sample.py index c4939451eae04..9eae10c1b8461 100644 --- a/nixos/tests/spark/spark_sample.py +++ b/nixos/tests/spark/spark_sample.py @@ -4,37 +4,35 @@ from pyspark.sql.types import * from pyspark.sql.functions import explode + def explode_col(weight): - return int(weight//10) * [10.0] + ([] if weight%10==0 else [weight%10]) + return int(weight // 10) * [10.0] + ([] if weight % 10 == 0 else [weight % 10]) + spark = SparkSession.builder.getOrCreate() dataSchema = [ StructField("feature_1", FloatType()), StructField("feature_2", FloatType()), - StructField("bias_weight", FloatType()) + StructField("bias_weight", FloatType()), ] -data = [ - Row(0.1, 0.2, 10.32), - Row(0.32, 1.43, 12.8), - Row(1.28, 1.12, 0.23) -] +data = [Row(0.1, 0.2, 10.32), Row(0.32, 1.43, 12.8), Row(1.28, 1.12, 0.23)] df = spark.createDataFrame(spark.sparkContext.parallelize(data), StructType(dataSchema)) normalizing_constant = 100 -sum_bias_weight = df.select(F.sum('bias_weight')).collect()[0][0] +sum_bias_weight = df.select(F.sum("bias_weight")).collect()[0][0] normalizing_factor = normalizing_constant / sum_bias_weight -df = df.withColumn('normalized_bias_weight', df.bias_weight * normalizing_factor) -df = df.drop('bias_weight') -df = df.withColumnRenamed('normalized_bias_weight', 'bias_weight') +df = df.withColumn("normalized_bias_weight", df.bias_weight * normalizing_factor) +df = df.drop("bias_weight") +df = df.withColumnRenamed("normalized_bias_weight", "bias_weight") my_udf = udf(lambda x: explode_col(x), ArrayType(FloatType())) -df1 = df.withColumn('explode_val', my_udf(df.bias_weight)) +df1 = df.withColumn("explode_val", my_udf(df.bias_weight)) df1 = df1.withColumn("explode_val_1", explode(df1.explode_val)).drop("explode_val") -df1 = df1.drop('bias_weight').withColumnRenamed('explode_val_1', 'bias_weight') +df1 = df1.drop("bias_weight").withColumnRenamed("explode_val_1", "bias_weight") df1.show() -assert(df1.count() == 12) +assert df1.count() == 12 diff --git a/nixos/tests/systemd-confinement/checkperms.py b/nixos/tests/systemd-confinement/checkperms.py index 3c7ba279a3d20..ac5dec2c8bf3a 100644 --- a/nixos/tests/systemd-confinement/checkperms.py +++ b/nixos/tests/systemd-confinement/checkperms.py @@ -13,6 +13,7 @@ class Accessibility(IntEnum): have within a confined service. Higher levels mean more permissions for the user and thus a bigger attack surface. """ + NONE = 0 # Directories can be listed or files can be read. @@ -45,7 +46,7 @@ def assert_on(self, path: Path) -> None: elif path.is_dir(): writable = True - dummy_file = path / 'can_i_write' + dummy_file = path / "can_i_write" try: dummy_file.touch() except OSError as e: @@ -68,7 +69,7 @@ def assert_on(self, path: Path) -> None: actual = self.READABLE elif path.is_file(): try: - with path.open('rb') as fp: + with path.open("rb") as fp: fp.read(1) actual = self.READABLE except PermissionError: @@ -76,8 +77,8 @@ def assert_on(self, path: Path) -> None: writable = True try: - with path.open('ab') as fp: - fp.write('x') + with path.open("ab") as fp: + fp.write("x") size = fp.tell() fp.truncate(size) except PermissionError: @@ -108,15 +109,17 @@ def assert_on(self, path: Path) -> None: if actual > self: stat = path.stat() - details = ', '.join([ - f'permissions: {stat.st_mode & 0o7777:o}', - f'uid: {stat.st_uid}', - f'group: {stat.st_gid}', - ]) + details = ", ".join( + [ + f"permissions: {stat.st_mode & 0o7777:o}", + f"uid: {stat.st_uid}", + f"group: {stat.st_gid}", + ] + ) raise AssertionError( - f'Expected at most {self!r} but got {actual!r} for path' - f' {path} ({details}).' + f"Expected at most {self!r} but got {actual!r} for path" + f" {path} ({details})." ) @@ -126,10 +129,10 @@ def is_special_fs(path: Path) -> bool: or sysfs. """ try: - if path == Path('/proc'): - return (path / 'version').read_text().startswith('Linux') - elif path == Path('/sys'): - return b'Linux' in (path / 'kernel' / 'notes').read_bytes() + if path == Path("/proc"): + return (path / "version").read_text().startswith("Linux") + elif path == Path("/sys"): + return b"Linux" in (path / "kernel" / "notes").read_bytes() except FileNotFoundError: pass return False @@ -152,7 +155,7 @@ def _assert_permissions_in_directory( for file in directory.iterdir(): if is_special_fs(file): - msg = f'Got unexpected special filesystem at {file}.' + msg = f"Got unexpected special filesystem at {file}." assert subdirs.pop(file) == Accessibility.SPECIAL, msg elif not file.is_symlink() and file.is_dir(): subdir_access = subdirs.pop(file, accessibility) @@ -175,7 +178,7 @@ def assert_permissions(subdirs: dict[str, Accessibility]) -> None: Recursively check whether the file system conforms to the accessibility specification we specified via 'subdirs'. """ - root = Path('/') + root = Path("/") absolute_subdirs = {root / p: a for p, a in subdirs.items()} _assert_permissions_in_directory( root, @@ -183,5 +186,5 @@ def assert_permissions(subdirs: dict[str, Accessibility]) -> None: absolute_subdirs, ) for file in absolute_subdirs.keys(): - msg = f'Expected {file} to exist, but it was nowwhere to be found.' + msg = f"Expected {file} to exist, but it was nowwhere to be found." raise AssertionError(msg) diff --git a/nixos/tests/systemd-confinement/concurrent-runner.py b/nixos/tests/systemd-confinement/concurrent-runner.py index 58cf9f7ff4d95..fa132e1a097ea 100644 --- a/nixos/tests/systemd-confinement/concurrent-runner.py +++ b/nixos/tests/systemd-confinement/concurrent-runner.py @@ -44,21 +44,23 @@ def client_actions(draw, size: int = 10): start = draw(st.integers(min_value=0, max_value=size - 2)) stop = draw(st.integers(min_value=start + 1, max_value=size - 1)) if start + 1 < stop: - runs = draw(st.sets( - st.integers(min_value=start + 1, max_value=stop - 1), - max_size=stop - start, - )) - - out = '' + runs = draw( + st.sets( + st.integers(min_value=start + 1, max_value=stop - 1), + max_size=stop - start, + ) + ) + + out = "" for index in range(size): if start is not None and index == start: - out += '[' + out += "[" elif stop is not None and index == stop: - out += ']' + out += "]" elif index in runs: - out += 'R' + out += "R" else: - out += ' ' + out += " " return out @@ -67,7 +69,7 @@ def cli() -> None: pass -@cli.command('driver') +@cli.command("driver") @settings(deadline=None, max_examples=20) @given(st.lists(client_actions(), max_size=5)) def test_driver(client_actions: list[str]) -> None: @@ -80,52 +82,51 @@ def test_driver(client_actions: list[str]) -> None: except IndexError: continue match action: - case '[': + case "[": client = socket.socket(socket.AF_INET6) client.settimeout(60) - client.connect(('::1', 12345)) - client.send(b'[') + client.connect(("::1", 12345)) + client.send(b"[") clients[n] = client - case ']': + case "]": assert client is not None - client.send(b']') + client.send(b"]") # At this point if we get ']' back from the client, we know # that everything went smoothly up to this point because # otherwise the client would have just thrown an exception # and the connection would be closed. - assert client.recv(1) == b']' + assert client.recv(1) == b"]" assert not client.recv(1) client.close() clients[n] = None - case 'R': + case "R": assert client is not None - client.send(b'R') - case ' ': + client.send(b"R") + case " ": if client is not None: - client.send(b' ') + client.send(b" ") sleep(0.1) - assert all(c is None for c in clients), \ - f'clients still running: {clients!r}' + assert all(c is None for c in clients), f"clients still running: {clients!r}" -@cli.command('client') -@click.argument('executable') +@cli.command("client") +@click.argument("executable") def test_client(executable: str) -> None: if not (action := sys.stdin.read(1)): raise SystemExit(1) - assert action == '[', f'{action!r} != "["' + assert action == "[", f'{action!r} != "["' while action := sys.stdin.read(1): match action: - case 'R': + case "R": run([executable], check=True, stdout=sys.stderr) - case ']': - sys.stdout.write(']') + case "]": + sys.stdout.write("]") return - case ' ': + case " ": sleep(0.1) - case '': + case "": raise SystemExit(1) -if __name__ == '__main__': +if __name__ == "__main__": cli() diff --git a/nixos/tests/ulogd/ulogd.py b/nixos/tests/ulogd/ulogd.py index 76a8d0c6e24a3..fa88959a8633d 100644 --- a/nixos/tests/ulogd/ulogd.py +++ b/nixos/tests/ulogd/ulogd.py @@ -11,20 +11,32 @@ machine.succeed("ping -f 127.0.0.1 -c 5 >&2") machine.succeed("sleep 2") machine.wait_until_succeeds("du /var/log/ulogd.pcap") - _, echo_request_packets = machine.execute("tcpdump -r /var/log/ulogd.pcap icmp[0] == 8 and host 127.0.0.1") + _, echo_request_packets = machine.execute( + "tcpdump -r /var/log/ulogd.pcap icmp[0] == 8 and host 127.0.0.1" + ) expected, actual = 5 * 2, len(echo_request_packets.splitlines()) - assert expected == actual, f"Expected {expected} ICMP request packets from pcap, got: {actual}" - _, echo_reply_packets = machine.execute("tcpdump -r /var/log/ulogd.pcap icmp[0] == 0 and host 127.0.0.1") + assert expected == actual, ( + f"Expected {expected} ICMP request packets from pcap, got: {actual}" + ) + _, echo_reply_packets = machine.execute( + "tcpdump -r /var/log/ulogd.pcap icmp[0] == 0 and host 127.0.0.1" + ) expected, actual = 5 * 2, len(echo_reply_packets.splitlines()) - assert expected == actual, f"Expected {expected} ICMP reply packets from pcap, got: {actual}" + assert expected == actual, ( + f"Expected {expected} ICMP reply packets from pcap, got: {actual}" + ) machine.wait_until_succeeds("du /var/log/ulogd_pkts.log") _, echo_request_packets = machine.execute("grep TYPE=8 /var/log/ulogd_pkts.log") expected, actual = 5 * 2, len(echo_request_packets.splitlines()) - assert expected == actual, f"Expected {expected} ICMP request packets from logfile, got: {actual}" + assert expected == actual, ( + f"Expected {expected} ICMP request packets from logfile, got: {actual}" + ) _, echo_reply_packets = machine.execute("grep TYPE=0 /var/log/ulogd_pkts.log") expected, actual = 5 * 2, len(echo_reply_packets.splitlines()) - assert expected == actual, f"Expected {expected} ICMP reply packets from logfile, got: {actual}" + assert expected == actual, ( + f"Expected {expected} ICMP reply packets from logfile, got: {actual}" + ) with subtest("Reloading service reopens log file"): machine.succeed("mv /var/log/ulogd.pcap /var/log/old_ulogd.pcap") @@ -33,17 +45,25 @@ machine.succeed("ping -f 127.0.0.1 -c 5 >&2") machine.succeed("sleep 2") machine.wait_until_succeeds("du /var/log/ulogd.pcap") - _, echo_request_packets = machine.execute("tcpdump -r /var/log/ulogd.pcap icmp[0] == 8 and host 127.0.0.1") + _, echo_request_packets = machine.execute( + "tcpdump -r /var/log/ulogd.pcap icmp[0] == 8 and host 127.0.0.1" + ) expected, actual = 5 * 2, len(echo_request_packets.splitlines()) assert expected == actual, f"Expected {expected} packets, got: {actual}" - _, echo_reply_packets = machine.execute("tcpdump -r /var/log/ulogd.pcap icmp[0] == 0 and host 127.0.0.1") + _, echo_reply_packets = machine.execute( + "tcpdump -r /var/log/ulogd.pcap icmp[0] == 0 and host 127.0.0.1" + ) expected, actual = 5 * 2, len(echo_reply_packets.splitlines()) assert expected == actual, f"Expected {expected} packets, got: {actual}" machine.wait_until_succeeds("du /var/log/ulogd_pkts.log") _, echo_request_packets = machine.execute("grep TYPE=8 /var/log/ulogd_pkts.log") expected, actual = 5 * 2, len(echo_request_packets.splitlines()) - assert expected == actual, f"Expected {expected} ICMP request packets from logfile, got: {actual}" + assert expected == actual, ( + f"Expected {expected} ICMP request packets from logfile, got: {actual}" + ) _, echo_reply_packets = machine.execute("grep TYPE=0 /var/log/ulogd_pkts.log") expected, actual = 5 * 2, len(echo_reply_packets.splitlines()) - assert expected == actual, f"Expected {expected} ICMP reply packets from logfile, got: {actual}" + assert expected == actual, ( + f"Expected {expected} ICMP reply packets from logfile, got: {actual}" + ) diff --git a/nixos/tests/web-apps/netbox/testScript.py b/nixos/tests/web-apps/netbox/testScript.py index f6c2c9821e121..1c28e62913839 100644 --- a/nixos/tests/web-apps/netbox/testScript.py +++ b/nixos/tests/web-apps/netbox/testScript.py @@ -7,59 +7,57 @@ test_objects = { "sites": { - "test-site": { - "name": "Test site", - "slug": "test-site" - }, - "test-site-two": { - "name": "Test site 2", - "slug": "test-site-second-edition" - } + "test-site": {"name": "Test site", "slug": "test-site"}, + "test-site-two": {"name": "Test site 2", "slug": "test-site-second-edition"}, }, "prefixes": { "v4-with-updated-desc": { "prefix": "192.0.2.0/24", "class_type": "Prefix", - "family": { "label": "IPv4" }, + "family": {"label": "IPv4"}, "scope": { "__typename": "SiteType", "id": "1", - "description": "Test site description" - } + "description": "Test site description", + }, }, "v6-cidr-32": { "prefix": "2001:db8::/32", "class_type": "Prefix", - "family": { "label": "IPv6" }, + "family": {"label": "IPv6"}, "scope": { "__typename": "SiteType", "id": "1", - "description": "Test site description" - } + "description": "Test site description", + }, }, "v6-cidr-48": { "prefix": "2001:db8:c0fe::/48", "class_type": "Prefix", - "family": { "label": "IPv6" }, + "family": {"label": "IPv6"}, "scope": { "__typename": "SiteType", "id": "1", - "description": "Test site description" - } - } - } + "description": "Test site description", + }, + }, + }, } + def compare(a: str, b: str): - differences = [(x - y) for (x,y) in list(zip( - list(map(int, a.split('.'))), - list(map(int, b.split('.'))) - ))] + differences = [ + (x - y) + for (x, y) in list( + zip(list(map(int, a.split("."))), list(map(int, b.split(".")))) + ) + ] for d in differences: if d != 0: return d return 0 + with subtest("Home screen loads"): machine.wait_until_succeeds( "curl -sSfL http://[::1]:8001 | grep 'Home | NetBox'" @@ -78,14 +76,13 @@ def compare(a: str, b: str): machine.wait_for_unit("network.target") with subtest("Home screen loads from nginx"): - machine.succeed( - "curl -sSfL http://localhost | grep 'Home | NetBox'" - ) + machine.succeed("curl -sSfL http://localhost | grep 'Home | NetBox'") with subtest("Staticfiles can be fetched"): machine.succeed("curl -sSfL http://localhost/static/netbox.js") machine.succeed("curl -sSfL http://localhost/static/docs/") + def login(username: str, password: str): encoded_data = json.dumps({"username": username, "password": password}) uri = "/users/tokens/provision/" @@ -101,9 +98,11 @@ def login(username: str, password: str): ) return result["key"] + with subtest("Can login"): auth_token = login("netbox", "netbox") + def get(uri: str): return json.loads( machine.succeed( @@ -114,6 +113,7 @@ def get(uri: str): ) ) + def delete(uri: str): return machine.succeed( "curl -sSfL " @@ -138,12 +138,15 @@ def data_request(uri: str, method: str, data: Dict[str, Any]): ) ) + def post(uri: str, data: Dict[str, Any]): return data_request(uri, "POST", data) + def patch(uri: str, data: Dict[str, Any]): return data_request(uri, "PATCH", data) + # Retrieve netbox version netbox_version = get("/status/")["netbox-version"] @@ -151,24 +154,30 @@ def patch(uri: str, data: Dict[str, Any]): result = post("/dcim/sites/", {"name": "Test site", "slug": "test-site"}) site_id = result["id"] - for prefix in test_objects["prefixes"].values(): - if compare(netbox_version, '4.2.0') >= 0: - post("/ipam/prefixes/", { - "prefix": prefix["prefix"], - "scope_id": site_id, - "scope_type": "dcim." + prefix["scope"]["__typename"].replace("Type", "").lower() - }) + if compare(netbox_version, "4.2.0") >= 0: + post( + "/ipam/prefixes/", + { + "prefix": prefix["prefix"], + "scope_id": site_id, + "scope_type": "dcim." + + prefix["scope"]["__typename"].replace("Type", "").lower(), + }, + ) prefix["scope"]["id"] = str(site_id) else: - post("/ipam/prefixes/", { - "prefix": prefix["prefix"], - "site": str(site_id), - }) + post( + "/ipam/prefixes/", + { + "prefix": prefix["prefix"], + "site": str(site_id), + }, + ) result = post( "/dcim/manufacturers/", - {"name": "Test manufacturer", "slug": "test-manufacturer"} + {"name": "Test manufacturer", "slug": "test-manufacturer"}, ) manufacturer_id = result["id"] @@ -209,6 +218,7 @@ def patch(uri: str, data: Dict[str, Any]): result = get("/dcim/device-types/") assert result["count"] == 0 + def request_graphql(query: str): return machine.succeed( "curl -sSfL " @@ -216,13 +226,13 @@ def request_graphql(query: str): "-H 'Content-Type: application/json' " f"-H 'Authorization: Token {auth_token}' " "'http://localhost/graphql/' " - f"--data '{json.dumps({"query": query})}'" + f"--data '{json.dumps({'query': query})}'" ) -if compare(netbox_version, '4.2.0') >= 0: +if compare(netbox_version, "4.2.0") >= 0: with subtest("Can use the GraphQL API (NetBox 4.2.0+)"): - graphql_query = '''query { + graphql_query = """query { prefix_list { prefix class_type @@ -238,18 +248,21 @@ def request_graphql(query: str): } } } - ''' + """ answer = request_graphql(graphql_query) result = json.loads(answer) assert len(result["data"]["prefix_list"]) == 3 - assert test_objects["prefixes"]["v4-with-updated-desc"] in result["data"]["prefix_list"] + assert ( + test_objects["prefixes"]["v4-with-updated-desc"] + in result["data"]["prefix_list"] + ) assert test_objects["prefixes"]["v6-cidr-32"] in result["data"]["prefix_list"] assert test_objects["prefixes"]["v6-cidr-48"] in result["data"]["prefix_list"] -if compare(netbox_version, '4.2.0') < 0: +if compare(netbox_version, "4.2.0") < 0: with subtest("Can use the GraphQL API (Netbox <= 4.2.0)"): - answer = request_graphql('''query { + answer = request_graphql("""query { prefix_list { prefix site { @@ -257,11 +270,16 @@ def request_graphql(query: str): } } } - ''') + """) result = json.loads(answer) print(result["data"]["prefix_list"][0]) - assert result["data"]["prefix_list"][0]["prefix"] == test_objects["prefixes"]["v4-with-updated-desc"]["prefix"] - assert int(result["data"]["prefix_list"][0]["site"]["id"]) == int(test_objects["prefixes"]["v4-with-updated-desc"]["scope"]["id"]) + assert ( + result["data"]["prefix_list"][0]["prefix"] + == test_objects["prefixes"]["v4-with-updated-desc"]["prefix"] + ) + assert int(result["data"]["prefix_list"][0]["site"]["id"]) == int( + test_objects["prefixes"]["v4-with-updated-desc"]["scope"]["id"] + ) with subtest("Can login with LDAP"): machine.wait_for_unit("openldap.service") @@ -271,4 +289,6 @@ def request_graphql(query: str): result = get("/users/users/?username=${testUser}") assert result["count"] == 1 - assert any(group["name"] == "${testGroup}" for group in result["results"][0]["groups"]) + assert any( + group["name"] == "${testGroup}" for group in result["results"][0]["groups"] + ) diff --git a/nixos/tests/windmill/api-integration.py b/nixos/tests/windmill/api-integration.py index fd1112eb403f4..9ff1c723bd6a9 100755 --- a/nixos/tests/windmill/api-integration.py +++ b/nixos/tests/windmill/api-integration.py @@ -9,12 +9,33 @@ import time parser = ArgumentParser() -parser.add_argument("-l", "--language", dest="language", type=str, - help="Name of the scripting language", metavar="LANG", required=True) -parser.add_argument("-s", "--script", dest="content_file", type=pathlib.Path, - help="read script contents from FILE", metavar="FILE", required=True) -parser.add_argument("-i", "--input", dest="input_file", type=pathlib.Path, - help="read script arguments from FILE", metavar="FILE", required=True) +parser.add_argument( + "-l", + "--language", + dest="language", + type=str, + help="Name of the scripting language", + metavar="LANG", + required=True, +) +parser.add_argument( + "-s", + "--script", + dest="content_file", + type=pathlib.Path, + help="read script contents from FILE", + metavar="FILE", + required=True, +) +parser.add_argument( + "-i", + "--input", + dest="input_file", + type=pathlib.Path, + help="read script arguments from FILE", + metavar="FILE", + required=True, +) args = parser.parse_args() @@ -31,8 +52,8 @@ login_req = Request( "http://localhost:8001/api/auth/login", method="POST", - headers={'Content-Type': 'application/json'}, - data=json.dumps(login_form).encode('utf-8') + headers={"Content-Type": "application/json"}, + data=json.dumps(login_form).encode("utf-8"), ) with http_client.open(login_req) as response: assert 200 == response.status, f"Failure {response.status}: Superuser login" @@ -50,22 +71,24 @@ python_version_req = Request( "http://localhost:8001/api/settings/global/instance_python_version", method="GET", - headers={'Authorization': f'Bearer {admin_token}'}, - data=json.dumps(python_version_form).encode('utf-8') + headers={"Authorization": f"Bearer {admin_token}"}, + data=json.dumps(python_version_form).encode("utf-8"), ) with http_client.open(python_version_req) as response: - assert 200 == response.status, f"Failure {response.status}: Update global instance python version." + assert 200 == response.status, ( + f"Failure {response.status}: Update global instance python version." + ) workspace_req = Request( "http://localhost:8001/api/workspaces/list_as_superadmin", method="GET", - headers={'Authorization': f'Bearer {admin_token}'} + headers={"Authorization": f"Bearer {admin_token}"}, ) with http_client.open(workspace_req) as response: assert 200 == response.status, f"Failure {response.status}: List workspaces" - workspace_list = json.loads(response.read().decode('utf-8')) - assert any(workspace['id'] == id_admin_workspace for workspace in workspace_list) + workspace_list = json.loads(response.read().decode("utf-8")) + assert any(workspace["id"] == id_admin_workspace for workspace in workspace_list) script_hash = None @@ -74,20 +97,22 @@ "summary": f"Test {args.language}", "description": "", "language": args.language, - "content": args.content_file.read_text() + "content": args.content_file.read_text(), } script_request = Request( f"http://localhost:8001/api/w/{id_admin_workspace}/scripts/create", method="POST", headers={ - 'Content-Type': 'application/json', - 'Authorization': f'Bearer {admin_token}', + "Content-Type": "application/json", + "Authorization": f"Bearer {admin_token}", }, - data=json.dumps(script_form).encode('utf-8') + data=json.dumps(script_form).encode("utf-8"), ) with http_client.open(script_request) as response: - assert 201 == response.status, f"Failure {response.status}: Create {args.language} script" - script_hash = response.read().decode('utf-8') + assert 201 == response.status, ( + f"Failure {response.status}: Create {args.language} script" + ) + script_hash = response.read().decode("utf-8") assert script_hash, "Failed to receive an identifier from script creation." # NOTE; Some languages require dependencies and the depenceny collection tasks take some time to complete @@ -100,8 +125,10 @@ ) with http_client.open(script_request) as response: try: - assert 200 == response.status, f"Failure {response.status}: Retrieve {args.language} deployment status" - script_metadata = json.loads(response.read().decode('utf-8')) + assert 200 == response.status, ( + f"Failure {response.status}: Retrieve {args.language} deployment status" + ) + script_metadata = json.loads(response.read().decode("utf-8")) # exists_lock_error = bool(script_metadata["lock_error_logs"]) assert not exists_lock_error, "Script deployment did not succeed" @@ -118,14 +145,16 @@ f"http://localhost:8001/api/w/{id_admin_workspace}/jobs/run/h/{script_hash}", method="POST", headers={ - 'Content-Type': 'application/json', - 'Authorization': f'Bearer {admin_token}', + "Content-Type": "application/json", + "Authorization": f"Bearer {admin_token}", }, data=args.input_file.read_bytes(), ) with http_client.open(request) as response: - assert 201 == response.status, f"Failure {response.status}: Run {args.language} script" - job_id = response.read().decode('utf-8') + assert 201 == response.status, ( + f"Failure {response.status}: Run {args.language} script" + ) + job_id = response.read().decode("utf-8") assert job_id, "Failed to receive an identifier from job creation/scheduling." @@ -140,14 +169,14 @@ f"http://localhost:8001/api/w/{id_admin_workspace}/jobs/completed/list", method="GET", headers={ - 'Authorization': f'Bearer {admin_token}', + "Authorization": f"Bearer {admin_token}", }, ) with http_client.open(retrieve_jobs_req) as response: assert 200 == response.status, f"Failure {response.status}: Retrieve jobs" - job_results = json.loads(response.read().decode('utf-8')) + job_results = json.loads(response.read().decode("utf-8")) for id in set(started_jobs): # Must create copy of set being iterated over - if any(job['id'] == id for job in job_results if bool(job['success'])): + if any(job["id"] == id for job in job_results if bool(job["success"])): started_jobs.remove(id) if any(started_jobs): diff --git a/pkgs/applications/editors/jetbrains/bin/update_bin.py b/pkgs/applications/editors/jetbrains/bin/update_bin.py index 9bea317315896..17a11ab3c0bfd 100755 --- a/pkgs/applications/editors/jetbrains/bin/update_bin.py +++ b/pkgs/applications/editors/jetbrains/bin/update_bin.py @@ -53,7 +53,7 @@ def download_sha256(url): url = f"{url}.sha256" download_response = requests.get(url) download_response.raise_for_status() - return download_response.content.decode('UTF-8').split(' ')[0] + return download_response.content.decode("UTF-8").split(" ")[0] channels = download_channels() @@ -78,7 +78,11 @@ def update_product(name, product): channel = channels.get(update_channel) if channel is None: logging.error("Failed to find channel %s.", update_channel) - logging.error("Check that the update-channel in %s matches the name in %s", versions_file_path, updates_url) + logging.error( + "Check that the update-channel in %s matches the name in %s", + versions_file_path, + updates_url, + ) else: try: build = latest_build(channel) @@ -92,20 +96,35 @@ def update_product(name, product): version_or_build_number = new_version else: version_or_build_number = new_build_number - version_number = new_version.split(' ')[0] - download_url = get_url(product["url-template"], version_or_build_number, version_number) + version_number = new_version.split(" ")[0] + download_url = get_url( + product["url-template"], version_or_build_number, version_number + ) if not download_url: - raise Exception(f"No valid url for {name} version {version_or_build_number}") + raise Exception( + f"No valid url for {name} version {version_or_build_number}" + ) product["url"] = download_url - if "sha256" not in product or product.get("build_number") != new_build_number: + if ( + "sha256" not in product + or product.get("build_number") != new_build_number + ): fromVersions[name] = product["version"] toVersions[name] = new_version - logging.info("Found a newer version %s with build number %s.", new_version, new_build_number) + logging.info( + "Found a newer version %s with build number %s.", + new_version, + new_build_number, + ) product["version"] = new_version product["build_number"] = new_build_number product["sha256"] = download_sha256(download_url) else: - logging.info("Already at the latest version %s with build number %s.", new_version, new_build_number) + logging.info( + "Already at the latest version %s with build number %s.", + new_version, + new_build_number, + ) except Exception as e: logging.exception("Update failed:", exc_info=e) logging.warning("Skipping %s due to the above error.", name) @@ -144,7 +163,9 @@ def update_products(products): # Commit the result logging.info("#### Committing changes... ####") -subprocess.run(['git', 'commit', f'-m{commitMessage}', '--', f'{versions_file_path}'], check=True) +subprocess.run( + ["git", "commit", f"-m{commitMessage}", "--", f"{versions_file_path}"], check=True +) logging.info("#### Updating plugins ####") plugin_script = current_path.joinpath("../plugins/update_plugins.py").resolve() diff --git a/pkgs/applications/editors/jetbrains/plugins/update_plugins.py b/pkgs/applications/editors/jetbrains/plugins/update_plugins.py index ff31cec97e1b4..d1c52e3031192 100755 --- a/pkgs/applications/editors/jetbrains/plugins/update_plugins.py +++ b/pkgs/applications/editors/jetbrains/plugins/update_plugins.py @@ -10,14 +10,23 @@ # Token priorities for version checking # From https://github.com/JetBrains/intellij-community/blob/94f40c5d77f60af16550f6f78d481aaff8deaca4/platform/util-rt/src/com/intellij/util/text/VersionComparatorUtil.java#L50 TOKENS = { - "snap": 10, "snapshot": 10, + "snap": 10, + "snapshot": 10, "m": 20, - "eap": 25, "pre": 25, "preview": 25, - "alpha": 30, "a": 30, - "beta": 40, "betta": 40, "b": 40, + "eap": 25, + "pre": 25, + "preview": 25, + "alpha": 30, + "a": 30, + "beta": 40, + "betta": 40, + "b": 40, "rc": 50, "sp": 70, - "rel": 80, "release": 80, "r": 80, "final": 80 + "rel": 80, + "release": 80, + "r": 80, + "final": 80, } SNAPSHOT_VALUE = 99999 PLUGINS_FILE = Path(__file__).parent.joinpath("plugins.json").resolve() @@ -38,7 +47,7 @@ "rider": "RIDER", "ruby-mine": "RUBYMINE", "rust-rover": "RUST", - "webstorm": "WEBSTORM" + "webstorm": "WEBSTORM", } PLUGIN_TO_FRIENDLY = {j: i for i, j in FRIENDLY_TO_PLUGIN.items()} @@ -58,7 +67,6 @@ def split(version_string: str): prev_type = None block = "" for char in version_string: - if char.isdigit(): cur_type = "number" elif char.isalpha(): @@ -117,23 +125,32 @@ def is_build_older(ver1: str, ver2: str) -> int: def is_compatible(build, since, until) -> bool: - return (not since or is_build_older(since, build) < 0) and (not until or 0 < is_build_older(until, build)) + return (not since or is_build_older(since, build) < 0) and ( + not until or 0 < is_build_older(until, build) + ) -def get_newest_compatible(pid: str, build: str, plugin_infos: dict, quiet: bool) -> [None, str]: +def get_newest_compatible( + pid: str, build: str, plugin_infos: dict, quiet: bool +) -> [None, str]: newest_ver = None newest_index = None for index, info in enumerate(plugin_infos): - if pick_newest(newest_ver, info["version"]) != newest_ver and \ - is_compatible(build, info["since"], info["until"]): + if pick_newest(newest_ver, info["version"]) != newest_ver and is_compatible( + build, info["since"], info["until"] + ): newest_ver = info["version"] newest_index = index if newest_ver is not None: - return "https://plugins.jetbrains.com/files/" + plugin_infos[newest_index]["file"] + return ( + "https://plugins.jetbrains.com/files/" + plugin_infos[newest_index]["file"] + ) else: if not quiet: - print(f"WARNING: Could not find version of plugin {pid} compatible with build {build}") + print( + f"WARNING: Could not find version of plugin {pid} compatible with build {build}" + ) return None @@ -163,29 +180,38 @@ def make_name_mapping(infos: dict) -> dict[str, str]: return sort_dict({i: id_to_name(*i.split("-", 1)) for i in infos.keys()}) -def make_plugin_files(plugin_infos: dict, ide_versions: dict, quiet: bool, extra_builds: list[str]) -> dict: +def make_plugin_files( + plugin_infos: dict, ide_versions: dict, quiet: bool, extra_builds: list[str] +) -> dict: result = {} names = make_name_mapping(plugin_infos) for pid in plugin_infos: plugin_versions = { "compatible": get_compatible_ides(pid), "builds": {}, - "name": names[pid] + "name": names[pid], } relevant_builds = [ - builds for ide, builds - in ide_versions.items() + builds + for ide, builds in ide_versions.items() if ( ide in plugin_versions["compatible"] # TODO: Remove this once we removed pycharm-community - or (ide == "pycharm-community" and "pycharm" in plugin_versions["compatible"]) + or ( + ide == "pycharm-community" + and "pycharm" in plugin_versions["compatible"] + ) # TODO: Remove this once we removed idea-community or (ide == "idea-community" and "idea" in plugin_versions["compatible"]) ) ] + [extra_builds] - relevant_builds = sorted(list(set(flatten(relevant_builds)))) # Flatten, remove duplicates and sort + relevant_builds = sorted( + list(set(flatten(relevant_builds))) + ) # Flatten, remove duplicates and sort for build in relevant_builds: - plugin_versions["builds"][build] = get_newest_compatible(pid, build, plugin_infos[pid], quiet) + plugin_versions["builds"][build] = get_newest_compatible( + pid, build, plugin_infos[pid], quiet + ) result[pid] = plugin_versions return result @@ -204,7 +230,10 @@ def get_hash(url): args.append("--executable") path_process = run(args, capture_output=True) path = path_process.stdout.decode().split("\n")[1] - result = run(["nix", "--extra-experimental-features", "nix-command", "hash", "path", path], capture_output=True) + result = run( + ["nix", "--extra-experimental-features", "nix-command", "hash", "path", path], + capture_output=True, + ) result_contents = result.stdout.decode()[:-1] if not result_contents: raise RuntimeError(result.stderr.decode()) @@ -254,20 +283,36 @@ def get_args() -> tuple[list[str], list[str], bool, bool, bool, list[str]]: parser = ArgumentParser( description="Add/remove/update entries in plugins.json", epilog="To update all plugins, run with no args.\n" - "To add a version of a plugin from a different channel, append -[channel] to the id.\n" - "The id of a plugin is the number before the name in the address of its page on https://plugins.jetbrains.com/" + "To add a version of a plugin from a different channel, append -[channel] to the id.\n" + "The id of a plugin is the number before the name in the address of its page on https://plugins.jetbrains.com/", + ) + parser.add_argument( + "-r", + "--refetch-all", + action="store_true", + help="don't use previously collected hashes, redownload all", + ) + parser.add_argument("-l", "--list", action="store_true", help="list plugin ids") + parser.add_argument( + "-q", + "--quiet", + action="store_true", + help="suppress warnings about not being able to find compatible plugin versions", + ) + parser.add_argument( + "-w", + "--with-build", + action="append", + default=[], + help="append [builds] to the list of builds to fetch plugin versions for", ) - parser.add_argument("-r", "--refetch-all", action="store_true", - help="don't use previously collected hashes, redownload all") - parser.add_argument("-l", "--list", action="store_true", - help="list plugin ids") - parser.add_argument("-q", "--quiet", action="store_true", - help="suppress warnings about not being able to find compatible plugin versions") - parser.add_argument("-w", "--with-build", action="append", default=[], - help="append [builds] to the list of builds to fetch plugin versions for") sub = parser.add_subparsers(dest="action") - sub.add_parser("add").add_argument("ids", type=str, nargs="+", help="plugin(s) to add") - sub.add_parser("remove").add_argument("ids", type=str, nargs="+", help="plugin(s) to remove") + sub.add_parser("add").add_argument( + "ids", type=str, nargs="+", help="plugin(s) to add" + ) + sub.add_parser("remove").add_argument( + "ids", type=str, nargs="+", help="plugin(s) to remove" + ) args = parser.parse_args() add = [] @@ -315,7 +360,10 @@ def get_plugin_info(pid: str, channel: str) -> dict: decoded = resp.json() if resp.status_code != 200: - print(f"Server gave non-200 code {resp.status_code} with message " + decoded["message"]) + print( + f"Server gave non-200 code {resp.status_code} with message " + + decoded["message"] + ) exit(1) return decoded @@ -393,7 +441,9 @@ def main(): print("Working out which plugins need which files") ide_versions = get_ide_versions() - result["plugins"] = make_plugin_files(plugin_infos, ide_versions, quiet, extra_builds) + result["plugins"] = make_plugin_files( + plugin_infos, ide_versions, quiet, extra_builds + ) print("Getting file hashes") file_list = get_file_names(result["plugins"]) @@ -404,8 +454,8 @@ def main(): # Commit the result commitMessage = "jetbrains.plugins: update" print("#### Committing changes... ####") - run(['git', 'commit', f'-m{commitMessage}', '--', f'{PLUGINS_FILE}'], check=True) + run(["git", "commit", f"-m{commitMessage}", "--", f"{PLUGINS_FILE}"], check=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pkgs/applications/editors/jetbrains/source/build_maven.py b/pkgs/applications/editors/jetbrains/source/build_maven.py index 189ec3f49c4b6..a9a6279d01a85 100755 --- a/pkgs/applications/editors/jetbrains/source/build_maven.py +++ b/pkgs/applications/editors/jetbrains/source/build_maven.py @@ -6,6 +6,7 @@ from json import dump from sys import stdout + def get_args() -> (str, list[str]): parser = ArgumentParser( description="Given the path of a intellij source tree, make a list of urls and hashes of maven artefacts required to build" @@ -21,26 +22,37 @@ def ensure_is_list(x): return [x] return x + def add_entries(sources, targets, hashes): for num, artefact in enumerate(sources): - hashes.append({ - "url": artefact["@url"][26:], - "hash": artefact["sha256sum"], - "path": targets[num]["@url"][25:-2] - }) - - -def add_libraries(root_path: str, hashes: list[dict[str, str]], projects_to_process: list[str]): + hashes.append( + { + "url": artefact["@url"][26:], + "hash": artefact["sha256sum"], + "path": targets[num]["@url"][25:-2], + } + ) + + +def add_libraries( + root_path: str, hashes: list[dict[str, str]], projects_to_process: list[str] +): library_paths = os.listdir(root_path + "/libraries/") for path in library_paths: file_contents = parse(open(root_path + "/libraries/" + path).read()) if "properties" not in file_contents["component"]["library"]: continue - sources = ensure_is_list(file_contents["component"]["library"]["properties"]["verification"]["artifact"]) - targets = ensure_is_list(file_contents["component"]["library"]["CLASSES"]["root"]) + sources = ensure_is_list( + file_contents["component"]["library"]["properties"]["verification"][ + "artifact" + ] + ) + targets = ensure_is_list( + file_contents["component"]["library"]["CLASSES"]["root"] + ) add_entries(sources, targets, hashes) - modules_xml = parse(open(root_path+"/modules.xml").read()) + modules_xml = parse(open(root_path + "/modules.xml").read()) for module in modules_xml["project"]["component"]["modules"]["module"]: projects_to_process.append(module["@filepath"]) @@ -49,19 +61,25 @@ def add_iml(path: str, hashes: list[dict[str, str]], projects_to_process: list[s try: contents = parse(open(path).read()) except FileNotFoundError: - print(f"Warning: path {path} does not exist (did you forget the android directory?)") + print( + f"Warning: path {path} does not exist (did you forget the android directory?)" + ) return for manager in ensure_is_list(contents["module"]["component"]): if manager["@name"] != "NewModuleRootManager": continue for entry in manager["orderEntry"]: - if type(entry) != dict or \ - entry["@type"] != "module-library" or \ - "properties" not in entry["library"]: + if ( + type(entry) != dict + or entry["@type"] != "module-library" + or "properties" not in entry["library"] + ): continue - sources = ensure_is_list(entry["library"]["properties"]["verification"]["artifact"]) + sources = ensure_is_list( + entry["library"]["properties"]["verification"]["artifact"] + ) targets = ensure_is_list(entry["library"]["CLASSES"]["root"]) add_entries(sources, targets, hashes) @@ -69,7 +87,7 @@ def add_iml(path: str, hashes: list[dict[str, str]], projects_to_process: list[s def main(): root_path, out = get_args() file_hashes = [] - projects_to_process: list[str] = [root_path+"/.idea"] + projects_to_process: list[str] = [root_path + "/.idea"] while projects_to_process: elem = projects_to_process.pop() @@ -87,5 +105,5 @@ def main(): file.write("\n") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pkgs/applications/editors/jetbrains/source/update.py b/pkgs/applications/editors/jetbrains/source/update.py index 08358f8c7c778..044a29b707b73 100755 --- a/pkgs/applications/editors/jetbrains/source/update.py +++ b/pkgs/applications/editors/jetbrains/source/update.py @@ -8,8 +8,14 @@ from json import dump, loads from sys import stdout + def convert_hash_to_sri(base32: str) -> str: - result = subprocess.run(["nix-hash", "--to-sri", "--type", "sha256", base32], capture_output=True, check=True, text=True) + result = subprocess.run( + ["nix-hash", "--to-sri", "--type", "sha256", base32], + capture_output=True, + check=True, + text=True, + ) return result.stdout.strip() @@ -22,50 +28,74 @@ def ensure_is_list(x): def jar_repositories(root_path: str) -> list[str]: repositories = [] file_contents = parse(open(root_path + "/.idea/jarRepositories.xml").read()) - component = file_contents['project']['component'] - if component['@name'] != 'RemoteRepositoriesConfiguration': + component = file_contents["project"]["component"] + if component["@name"] != "RemoteRepositoriesConfiguration": return repositories - options = component['remote-repository'] + options = component["remote-repository"] for option in ensure_is_list(options): - for item in option['option']: - if item['@name'] == 'url': - repositories.append(item['@value']) + for item in option["option"]: + if item["@name"] == "url": + repositories.append(item["@value"]) return repositories def kotlin_jps_plugin_info(root_path: str) -> (str, str): file_contents = parse(open(root_path + "/.idea/kotlinc.xml").read()) - components = file_contents['project']['component'] + components = file_contents["project"]["component"] for component in components: - if component['@name'] != 'KotlinJpsPluginSettings': + if component["@name"] != "KotlinJpsPluginSettings": continue - option = component['option'] - version = option['@value'] + option = component["option"] + version = option["@value"] print(f"* Prefetching Kotlin JPS Plugin version {version}...") - prefetch = subprocess.run(["nix-prefetch-url", "--type", "sha256", f"https://cache-redirector.jetbrains.com/maven.pkg.jetbrains.space/kotlin/p/kotlin/kotlin-ide-plugin-dependencies/org/jetbrains/kotlin/kotlin-jps-plugin-classpath/{version}/kotlin-jps-plugin-classpath-{version}.jar"], capture_output=True, check=True, text=True) + prefetch = subprocess.run( + [ + "nix-prefetch-url", + "--type", + "sha256", + f"https://cache-redirector.jetbrains.com/maven.pkg.jetbrains.space/kotlin/p/kotlin/kotlin-ide-plugin-dependencies/org/jetbrains/kotlin/kotlin-jps-plugin-classpath/{version}/kotlin-jps-plugin-classpath-{version}.jar", + ], + capture_output=True, + check=True, + text=True, + ) return (version, convert_hash_to_sri(prefetch.stdout.strip())) def requested_kotlinc_version(root_path: str) -> str: file_contents = parse(open(root_path + "/.idea/kotlinc.xml").read()) - components = file_contents['project']['component'] + components = file_contents["project"]["component"] for component in components: - if component['@name'] != 'KotlinJpsPluginSettings': + if component["@name"] != "KotlinJpsPluginSettings": continue - option = component['option'] - version = option['@value'] + option = component["option"] + version = option["@value"] return version def prefetch_intellij_community(variant: str, buildNumber: str) -> (str, str): print("* Prefetching IntelliJ community source code...") - prefetch = subprocess.run(["nix-prefetch-url", "--print-path", "--unpack", "--name", "source", "--type", "sha256", f"https://github.com/jetbrains/intellij-community/archive/{variant}/{buildNumber}.tar.gz"], capture_output=True, check=True, text=True) + prefetch = subprocess.run( + [ + "nix-prefetch-url", + "--print-path", + "--unpack", + "--name", + "source", + "--type", + "sha256", + f"https://github.com/jetbrains/intellij-community/archive/{variant}/{buildNumber}.tar.gz", + ], + capture_output=True, + check=True, + text=True, + ) parts = prefetch.stdout.strip().split() hash = convert_hash_to_sri(parts[0]) @@ -76,7 +106,20 @@ def prefetch_intellij_community(variant: str, buildNumber: str) -> (str, str): def prefetch_android(variant: str, buildNumber: str) -> str: print("* Prefetching Android plugin source code...") - prefetch = subprocess.run(["nix-prefetch-url", "--unpack", "--name", "source", "--type", "sha256", f"https://github.com/jetbrains/android/archive/{variant}/{buildNumber}.tar.gz"], capture_output=True, check=True, text=True) + prefetch = subprocess.run( + [ + "nix-prefetch-url", + "--unpack", + "--name", + "source", + "--type", + "sha256", + f"https://github.com/jetbrains/android/archive/{variant}/{buildNumber}.tar.gz", + ], + capture_output=True, + check=True, + text=True, + ) return convert_hash_to_sri(prefetch.stdout.strip()) @@ -93,38 +136,54 @@ def get_args() -> (str, str): def main(): versions_path, out = get_args() versions = loads(open(versions_path).read()) - idea_data = versions['x86_64-linux']['idea-oss'] - pycharm_data = versions['x86_64-linux']['pycharm-oss'] - - result = { 'idea-oss': {}, 'pycharm-oss': {} } - result['idea-oss']['version'] = idea_data['version'] - result['idea-oss']['buildNumber'] = idea_data['build_number'] - result['idea-oss']['buildType'] = 'idea' - result['pycharm-oss']['version'] = pycharm_data['version'] - result['pycharm-oss']['buildNumber'] = pycharm_data['build_number'] - result['pycharm-oss']['buildType'] = 'pycharm' - print('Fetching IDEA info...') - result['idea-oss']['ideaHash'], ideaOutPath = prefetch_intellij_community('idea', result['idea-oss']['buildNumber']) - result['idea-oss']['androidHash'] = prefetch_android('idea', result['idea-oss']['buildNumber']) - result['idea-oss']['jpsHash'] = '' - result['idea-oss']['restarterHash'] = '' - result['idea-oss']['mvnDeps'] = 'idea_maven_artefacts.json' - result['idea-oss']['repositories'] = jar_repositories(ideaOutPath) - result['idea-oss']['kotlin-jps-plugin'] = {} - result['idea-oss']['kotlin-jps-plugin']['version'], result['idea-oss']['kotlin-jps-plugin']['hash'] = kotlin_jps_plugin_info(ideaOutPath) + idea_data = versions["x86_64-linux"]["idea-oss"] + pycharm_data = versions["x86_64-linux"]["pycharm-oss"] + + result = {"idea-oss": {}, "pycharm-oss": {}} + result["idea-oss"]["version"] = idea_data["version"] + result["idea-oss"]["buildNumber"] = idea_data["build_number"] + result["idea-oss"]["buildType"] = "idea" + result["pycharm-oss"]["version"] = pycharm_data["version"] + result["pycharm-oss"]["buildNumber"] = pycharm_data["build_number"] + result["pycharm-oss"]["buildType"] = "pycharm" + print("Fetching IDEA info...") + result["idea-oss"]["ideaHash"], ideaOutPath = prefetch_intellij_community( + "idea", result["idea-oss"]["buildNumber"] + ) + result["idea-oss"]["androidHash"] = prefetch_android( + "idea", result["idea-oss"]["buildNumber"] + ) + result["idea-oss"]["jpsHash"] = "" + result["idea-oss"]["restarterHash"] = "" + result["idea-oss"]["mvnDeps"] = "idea_maven_artefacts.json" + result["idea-oss"]["repositories"] = jar_repositories(ideaOutPath) + result["idea-oss"]["kotlin-jps-plugin"] = {} + ( + result["idea-oss"]["kotlin-jps-plugin"]["version"], + result["idea-oss"]["kotlin-jps-plugin"]["hash"], + ) = kotlin_jps_plugin_info(ideaOutPath) kotlinc_version = requested_kotlinc_version(ideaOutPath) print(f"* Prefetched IDEA Open Source requested Kotlin compiler {kotlinc_version}") - print('Fetching PyCharm info...') - result['pycharm-oss']['ideaHash'], pycharmOutPath = prefetch_intellij_community('pycharm', result['pycharm-oss']['buildNumber']) - result['pycharm-oss']['androidHash'] = prefetch_android('pycharm', result['pycharm-oss']['buildNumber']) - result['pycharm-oss']['jpsHash'] = '' - result['pycharm-oss']['restarterHash'] = '' - result['pycharm-oss']['mvnDeps'] = 'pycharm_maven_artefacts.json' - result['pycharm-oss']['repositories'] = jar_repositories(pycharmOutPath) - result['pycharm-oss']['kotlin-jps-plugin'] = {} - result['pycharm-oss']['kotlin-jps-plugin']['version'], result['pycharm-oss']['kotlin-jps-plugin']['hash'] = kotlin_jps_plugin_info(pycharmOutPath) + print("Fetching PyCharm info...") + result["pycharm-oss"]["ideaHash"], pycharmOutPath = prefetch_intellij_community( + "pycharm", result["pycharm-oss"]["buildNumber"] + ) + result["pycharm-oss"]["androidHash"] = prefetch_android( + "pycharm", result["pycharm-oss"]["buildNumber"] + ) + result["pycharm-oss"]["jpsHash"] = "" + result["pycharm-oss"]["restarterHash"] = "" + result["pycharm-oss"]["mvnDeps"] = "pycharm_maven_artefacts.json" + result["pycharm-oss"]["repositories"] = jar_repositories(pycharmOutPath) + result["pycharm-oss"]["kotlin-jps-plugin"] = {} + ( + result["pycharm-oss"]["kotlin-jps-plugin"]["version"], + result["pycharm-oss"]["kotlin-jps-plugin"]["hash"], + ) = kotlin_jps_plugin_info(pycharmOutPath) kotlinc_version = requested_kotlinc_version(pycharmOutPath) - print(f"* Prefetched PyCharm Open Source requested Kotlin compiler {kotlinc_version}") + print( + f"* Prefetched PyCharm Open Source requested Kotlin compiler {kotlinc_version}" + ) if out == "stdout": dump(result, stdout, indent=2) @@ -134,5 +193,5 @@ def main(): file.write("\n") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pkgs/applications/editors/kakoune/plugins/update.py b/pkgs/applications/editors/kakoune/plugins/update.py index 169304c9aa4e2..253cb9e0c7b3d 100755 --- a/pkgs/applications/editors/kakoune/plugins/update.py +++ b/pkgs/applications/editors/kakoune/plugins/update.py @@ -53,7 +53,9 @@ class KakouneEditor(nixpkgs_plugin_update.Editor): def generate_nix( self, - plugins: list[tuple[nixpkgs_plugin_update.PluginDesc, nixpkgs_plugin_update.Plugin]], + plugins: list[ + tuple[nixpkgs_plugin_update.PluginDesc, nixpkgs_plugin_update.Plugin] + ], outfile: str, ): with open(outfile, "w+") as f: diff --git a/pkgs/applications/editors/vim/plugins/utils/nvim-treesitter/update.py b/pkgs/applications/editors/vim/plugins/utils/nvim-treesitter/update.py index debdedac3b482..e5437be149e3d 100755 --- a/pkgs/applications/editors/vim/plugins/utils/nvim-treesitter/update.py +++ b/pkgs/applications/editors/vim/plugins/utils/nvim-treesitter/update.py @@ -33,9 +33,7 @@ def generate_grammar(lang, parser_info): version = "0.0.0+rev={rev[:7]}"; src = """ - generated += subprocess.check_output( - ["nurl", url, rev, "--indent=4"], text=True - ) + generated += subprocess.check_output(["nurl", url, rev, "--indent=4"], text=True) generated += ";" location = install_info.get("location", "") diff --git a/pkgs/applications/editors/vim/plugins/utils/update.py b/pkgs/applications/editors/vim/plugins/utils/update.py index c167f0a92386a..1d7c8ecd7938d 100755 --- a/pkgs/applications/editors/vim/plugins/utils/update.py +++ b/pkgs/applications/editors/vim/plugins/utils/update.py @@ -36,9 +36,7 @@ treesitter = importlib.import_module("nvim-treesitter.update") -HEADER = ( - "# GENERATED by ./pkgs/applications/editors/vim/plugins/utils/update.py. Do not edit!" -) +HEADER = "# GENERATED by ./pkgs/applications/editors/vim/plugins/utils/update.py. Do not edit!" NIXPKGS_NVIMTREESITTER_FOLDER = "pkgs/applications/editors/vim/plugins/nvim-treesitter" @@ -47,7 +45,9 @@ class VimEditor(nixpkgs_plugin_update.Editor): nvim_treesitter_updated = False def generate_nix( - self, plugins: List[Tuple[PluginDesc, nixpkgs_plugin_update.Plugin]], outfile: str + self, + plugins: List[Tuple[PluginDesc, nixpkgs_plugin_update.Plugin]], + outfile: str, ): log.info("Generating nix code") log.debug("Loading nvim-treesitter revision from nix...") @@ -105,7 +105,9 @@ def plugin2nix( self, pdesc: PluginDesc, plugin: nixpkgs_plugin_update.Plugin, isNeovim: bool ) -> str: if isNeovim: - raise RuntimeError(f"Plugin {plugin.name} is already packaged in `luaPackages`, please use that") + raise RuntimeError( + f"Plugin {plugin.name} is already packaged in `luaPackages`, please use that" + ) repo = pdesc.repo content = f" {plugin.normalized_name} = " diff --git a/pkgs/applications/gis/qgis/test.py b/pkgs/applications/gis/qgis/test.py index 4f5772e30cea5..e266b11752677 100644 --- a/pkgs/applications/gis/qgis/test.py +++ b/pkgs/applications/gis/qgis/test.py @@ -11,8 +11,8 @@ test_interactive = eval(os.getenv("QGIS_TEST_INTERACTIVE", "False")) -def test(test_interactive=False): +def test(test_interactive=False): import osgeo # just to check if geo python modules are available from qgis.core import QgsVectorLayer, QgsFeature, QgsGeometry, QgsProject @@ -31,7 +31,7 @@ def test(test_interactive=False): ) """ - layer = QgsVectorLayer('Polygon?crs=epsg:3857', 'QGIS-on-Nix', 'memory') + layer = QgsVectorLayer("Polygon?crs=epsg:3857", "QGIS-on-Nix", "memory") provider = layer.dataProvider() polygon = QgsFeature() @@ -49,6 +49,7 @@ def test(test_interactive=False): QgsProject.instance().removeMapLayer(layer) QgsProject.instance().clear() + try: test(test_interactive=test_interactive) @@ -61,4 +62,3 @@ def test(test_interactive=False): print("QGIS test script has failed.") print("Error message: {}".format(e)) os._exit(1) - diff --git a/pkgs/applications/networking/browsers/chromium/depot_tools.py b/pkgs/applications/networking/browsers/chromium/depot_tools.py index ab8007a4e9107..c3b061394313b 100755 --- a/pkgs/applications/networking/browsers/chromium/depot_tools.py +++ b/pkgs/applications/networking/browsers/chromium/depot_tools.py @@ -8,6 +8,7 @@ a working depot_tools checkout and a ref to fetch and prints the result as JSON to stdout. """ + import base64 import json from typing import Optional @@ -43,7 +44,9 @@ def get_deps(self, repo_vars: dict, path: str) -> None: ) deps_file = self.get_file("DEPS") - evaluated = gclient_eval.Parse(deps_file, vars_override=repo_vars, filename="DEPS") + evaluated = gclient_eval.Parse( + deps_file, vars_override=repo_vars, filename="DEPS" + ) repo_vars = dict(evaluated.get("vars", {})) | repo_vars @@ -104,17 +107,30 @@ def repo_from_dep(dep: dict) -> Optional[Repo]: return None - -chromium = GitilesRepo("https://chromium.googlesource.com/chromium/src.git", chromium_version) +chromium = GitilesRepo( + "https://chromium.googlesource.com/chromium/src.git", chromium_version +) chromium.get_deps( { **{ - f"checkout_{platform}": platform == "linux" or platform == "x64" or platform == "arm64" or platform == "arm" - for platform in ["ios", "chromeos", "android", "mac", "win", "linux"] + f"checkout_{platform}": platform == "linux" + or platform == "x64" + or platform == "arm64" + or platform == "arm" + for platform in ["ios", "chromeos", "android", "mac", "win", "linux"] }, **{ - f"checkout_{arch}": True - for arch in ["x64", "arm64", "arm", "x86", "mips", "mips64", "ppc", "riscv64"] + f"checkout_{arch}": True + for arch in [ + "x64", + "arm64", + "arm", + "x86", + "mips", + "mips64", + "ppc", + "riscv64", + ] }, }, "", diff --git a/pkgs/applications/networking/browsers/chromium/get-commit-message.py b/pkgs/applications/networking/browsers/chromium/get-commit-message.py index 0a8ed0a60a49b..38e8ec08fbbb5 100755 --- a/pkgs/applications/networking/browsers/chromium/get-commit-message.py +++ b/pkgs/applications/networking/browsers/chromium/get-commit-message.py @@ -14,40 +14,46 @@ import requests # Official rss/atom feed taken from 's html source () -feed = feedparser.parse('https://www.blogger.com/feeds/8982037438137564684/posts/default') -html_tags = re.compile(r'<[^>]+>') +feed = feedparser.parse( + "https://www.blogger.com/feeds/8982037438137564684/posts/default" +) +html_tags = re.compile(r"<[^>]+>") target_version = sys.argv[1] if len(sys.argv) == 2 else None for entry in feed.entries: - url = requests.get(entry.link).url.split('?')[0] - if entry.title.lower() != 'Stable Channel Update for Desktop'.lower(): - if target_version and entry.title == '': + url = requests.get(entry.link).url.split("?")[0] + if entry.title.lower() != "Stable Channel Update for Desktop".lower(): + if target_version and entry.title == "": # Workaround for a special case (Chrome Releases bug?): - if not 'the-stable-channel-has-been-updated-to' in url: + if not "the-stable-channel-has-been-updated-to" in url: continue else: continue content = entry.content[0].value - content = html_tags.sub('', content) # Remove any HTML tags - if re.search(r'Linux', content) is None: + content = html_tags.sub("", content) # Remove any HTML tags + if re.search(r"Linux", content) is None: continue - #print(url) # For debugging purposes - version = re.search(r'\d+(\.\d+){3}', content).group(0) + # print(url) # For debugging purposes + version = re.search(r"\d+(\.\d+){3}", content).group(0) if target_version: if version != target_version: continue else: - print('chromium: TODO -> ' + version + '\n') + print("chromium: TODO -> " + version + "\n") print(url) - if fixes := re.search(r'This update includes .+ security fix(es)?\.', content): + if fixes := re.search(r"This update includes .+ security fix(es)?\.", content): fixes = fixes.group(0) - if zero_days := re.search(r'Google is aware( of reports)? th(e|at) .+ in the wild\.', content): + if zero_days := re.search( + r"Google is aware( of reports)? th(e|at) .+ in the wild\.", content + ): fixes += " " + zero_days.group(0) - print('\n' + '\n'.join(textwrap.wrap(fixes, width=72))) - if cve_list := re.findall(r'CVE-[^: ]+', content): - cve_list = list(OrderedDict.fromkeys(cve_list)) # Remove duplicates but preserve the order - cve_string = ' '.join(cve_list) - print("\nCVEs:\n" + '\n'.join(textwrap.wrap(cve_string, width=72))) + print("\n" + "\n".join(textwrap.wrap(fixes, width=72))) + if cve_list := re.findall(r"CVE-[^: ]+", content): + cve_list = list( + OrderedDict.fromkeys(cve_list) + ) # Remove duplicates but preserve the order + cve_string = " ".join(cve_list) + print("\nCVEs:\n" + "\n".join(textwrap.wrap(cve_string, width=72))) sys.exit(0) # We only care about the most recent stable channel update print("Error: No match.") diff --git a/pkgs/applications/networking/instant-messengers/discord/disable-breaking-updates.py b/pkgs/applications/networking/instant-messengers/discord/disable-breaking-updates.py index a7ffd8405ad64..466d3c0294b60 100644 --- a/pkgs/applications/networking/instant-messengers/discord/disable-breaking-updates.py +++ b/pkgs/applications/networking/instant-messengers/discord/disable-breaking-updates.py @@ -19,14 +19,19 @@ config_home = { "darwin": os.path.join(os.path.expanduser("~"), "Library", "Application Support"), - "linux": os.environ.get("XDG_CONFIG_HOME") or os.path.join(os.path.expanduser("~"), ".config") + "linux": os.environ.get("XDG_CONFIG_HOME") + or os.path.join(os.path.expanduser("~"), ".config"), }.get(sys.platform, None) if config_home is None: print("[Nix] Unsupported operating system.") sys.exit(1) -config_dir_name = "@configDirName@".replace(" ", "") if sys.platform == "darwin" else "@configDirName@" +config_dir_name = ( + "@configDirName@".replace(" ", "") + if sys.platform == "darwin" + else "@configDirName@" +) settings_path = Path(f"{config_home}/{config_dir_name}/settings.json") settings_path_temp = Path(f"{config_home}/{config_dir_name}/settings.json.tmp") diff --git a/pkgs/applications/office/libreoffice/generate-libreoffice-srcs.py b/pkgs/applications/office/libreoffice/generate-libreoffice-srcs.py index 7e2c13b5b17c9..0040d5bc76eff 100755 --- a/pkgs/applications/office/libreoffice/generate-libreoffice-srcs.py +++ b/pkgs/applications/office/libreoffice/generate-libreoffice-srcs.py @@ -8,69 +8,75 @@ todo - Ideally we would move as much as possible into derivation dependencies. """ + import collections, itertools, json, re, subprocess, sys, os import urllib.request, urllib.error -def main(): +def main(): packages = list(get_packages()) for x in packages: print(x, file=sys.stderr) - print('[') + print("[") for x in packages: - - md5 = x['md5'] - upstream_sha256 = x['sha256'] + md5 = x["md5"] + upstream_sha256 = x["sha256"] if upstream_sha256: hash = upstream_sha256 - hashtype = 'sha256' + hashtype = "sha256" else: hash = md5 - hashtype = 'md5' - tarball = x['tarball'] + hashtype = "md5" + tarball = x["tarball"] url = construct_url(x) - print('url: {}'.format(url), file=sys.stderr) + print("url: {}".format(url), file=sys.stderr) path = download(url, tarball, hash, hashtype) - print('path: {}'.format(path), file=sys.stderr) + print("path: {}".format(path), file=sys.stderr) sha256 = get_sha256(path) - print('sha256: {}'.format(sha256), file=sys.stderr) + print("sha256: {}".format(sha256), file=sys.stderr) - print(' {') + print(" {") print(' name = "{}";'.format(tarball)) print(' url = "{}";'.format(url)) print(' sha256 = "{}";'.format(sha256)) print(' md5 = "{}";'.format(md5)) - print(' md5name = "{}-{}";'.format(md5 or upstream_sha256,tarball)) - print(' }') + print(' md5name = "{}-{}";'.format(md5 or upstream_sha256, tarball)) + print(" }") - print(']') + print("]") def construct_url(x): - if x['brief']: - url = 'https://dev-www.libreoffice.org/src/{}{}'.format( - x.get('subdir', ''), x['tarball']) + if x["brief"]: + url = "https://dev-www.libreoffice.org/src/{}{}".format( + x.get("subdir", ""), x["tarball"] + ) else: - url = 'https://dev-www.libreoffice.org/src/{}{}-{}'.format( - x.get('subdir', ''), x['md5'], x['tarball']) + url = "https://dev-www.libreoffice.org/src/{}{}-{}".format( + x.get("subdir", ""), x["md5"], x["tarball"] + ) - if x['name'].startswith('FONT_NOTO_') and not probe_url(url): - return 'https://noto-website-2.storage.googleapis.com/pkgs/{}'.format(x['tarball']) + if x["name"].startswith("FONT_NOTO_") and not probe_url(url): + return "https://noto-website-2.storage.googleapis.com/pkgs/{}".format( + x["tarball"] + ) - if x['name'] == 'FONT_OPENDYSLEXIC': - return 'https://github.com/antijingoist/opendyslexic/releases/download/v0.91.12/{}'.format(x['tarball']) + if x["name"] == "FONT_OPENDYSLEXIC": + return "https://github.com/antijingoist/opendyslexic/releases/download/v0.91.12/{}".format( + x["tarball"] + ) return url def probe_url(url: str) -> bool: - request = urllib.request.Request(url, method='HEAD') + request = urllib.request.Request(url, method="HEAD") try: with urllib.request.urlopen(request) as response: return response.status == 200 @@ -79,18 +85,28 @@ def probe_url(url: str) -> bool: def download(url, name, hash, hashtype): - cmd = ['nix-prefetch-url', url, hash, '--print-path', - '--type', hashtype, '--name', name] - proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=True, - universal_newlines=True) - return proc.stdout.split('\n')[1].strip() + cmd = [ + "nix-prefetch-url", + url, + hash, + "--print-path", + "--type", + hashtype, + "--name", + name, + ] + proc = subprocess.run( + cmd, stdout=subprocess.PIPE, check=True, universal_newlines=True + ) + return proc.stdout.split("\n")[1].strip() def get_sha256(path): - cmd = ['sha256sum', path] - proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=True, - universal_newlines=True) - return proc.stdout.split(' ')[0].strip() + cmd = ["sha256sum", path] + proc = subprocess.run( + cmd, stdout=subprocess.PIPE, check=True, universal_newlines=True + ) + return proc.stdout.split(" ")[0].strip() def get_packages(): @@ -98,8 +114,7 @@ def get_packages(): All of the package data: What's parsed from download.lst, plus our additions. """ - return apply_additions(get_packages_from_download_list(), - get_additions()) + return apply_additions(get_packages_from_download_list(), get_additions()) def get_additions(): @@ -107,14 +122,13 @@ def get_additions(): A mapping from package name (the all-caps identifiers used in `download.lst`) to a dict of additional attributes to set on the package. """ - with open('./libreoffice-srcs-additions.json') as f: + with open("./libreoffice-srcs-additions.json") as f: return json.load(f) def apply_additions(xs, additions): for x in xs: - yield dict_merge([x, - additions.get(x['name'], {})]) + yield dict_merge([x, additions.get(x["name"], {})]) def get_packages_from_download_list(): @@ -125,34 +139,33 @@ def get_packages_from_download_list(): def lines(): for x in sub_symbols(parse_lines(get_lines())): - interpretation = interpret(x) - if interpretation == 'unrecognized': + if interpretation == "unrecognized": print_skipped_line(x) else: - yield dict_merge([x, - interpretation]) + yield dict_merge([x, interpretation]) def cluster(xs): """ Groups lines according to their order within the file, to support packages that are listed in `download.lst` more than once. """ - keys = ['tarball', 'md5', 'sha256', 'brief'] - a = {k: [x for x in xs if k in x['attrs']] for k in keys} + keys = ["tarball", "md5", "sha256", "brief"] + a = {k: [x for x in xs if k in x["attrs"]] for k in keys} return zip(*[a[k] for k in keys]) def packages(): - for (name, group) in groupby(lines(), lambda x: x['name']): + for name, group in groupby(lines(), lambda x: x["name"]): for xs in cluster(group): - yield {'name': name, - 'attrs': dict_merge(x['attrs'] for x in xs), - 'index': min(x['index'] for x in xs)} + yield { + "name": name, + "attrs": dict_merge(x["attrs"] for x in xs), + "index": min(x["index"] for x in xs), + } - for x in sorted(packages(), key=lambda x: x['index']): - yield dict_merge([{'name': x['name']}, - x['attrs']]) + for x in sorted(packages(), key=lambda x: x["index"]): + yield dict_merge([{"name": x["name"]}, x["attrs"]]) def dict_merge(xs): @@ -168,24 +181,20 @@ def groupby(xs, f): >>> groupby([1, 2, 3, 4], lambda x: x % 2) [(0, [2, 4]), (1, [1, 3])] """ - for (k, iter) in itertools.groupby(sorted(xs, key=f), f): + for k, iter in itertools.groupby(sorted(xs, key=f), f): group = list(iter) yield (f(group[0]), group) def get_lines(): - - download_list = os.getenv('downloadList') + download_list = os.getenv("downloadList") with open(download_list) as f: return f.read().splitlines() def print_skipped_line(x): - - print('Skipped line {}: {}'.format(x['index'], - x['original']), - file=sys.stderr) + print("Skipped line {}: {}".format(x["index"], x["original"]), file=sys.stderr) def parse_lines(lines): @@ -193,19 +202,17 @@ def parse_lines(lines): Input: List of strings (the lines from `download.lst` Output: Iterator of dicts with keys 'key', 'value', and 'index' """ - for (index, line) in enumerate(lines): - - x = { 'index': index, 'original': line } + for index, line in enumerate(lines): + x = {"index": index, "original": line} result = parse_line(line) - if result == 'nothing': + if result == "nothing": pass - elif result == 'unrecognized': + elif result == "unrecognized": print_skipped_line(x) else: - yield dict_merge([x, - result]) + yield dict_merge([x, result]) def parse_line(line): @@ -216,18 +223,15 @@ def parse_line(line): 2. 'unrecognized' (if parsing failed) """ - if re.match('\s*(#.*)?$', line): - return 'nothing' + if re.match("\s*(#.*)?$", line): + return "nothing" - match = re.match('([^:\s]+)\s*:=\s*(.*)$', line) + match = re.match("([^:\s]+)\s*:=\s*(.*)$", line) if match: - return { - 'key': match.group(1), - 'value': match.group(2).strip() - } + return {"key": match.group(1), "value": match.group(2).strip()} else: - return 'unrecognized' + return "unrecognized" def sub_symbols(xs): @@ -241,15 +245,14 @@ def sub_symbols(xs): xs = list(xs) - symbols = {x['key']: x for x in xs} + symbols = {x["key"]: x for x in xs} def get_value(k): x = symbols.get(k) - return x['value'] if x is not None else '' + return x["value"] if x is not None else "" for x in xs: - yield dict_merge([{'value': sub_str(x['value'], get_value)}, - x]) + yield dict_merge([{"value": sub_str(x["value"], get_value)}, x]) def sub_str(string, func): @@ -265,7 +268,7 @@ def func2(m): result = func(x) return result if result is not None else x - return re.sub(r'\$\(([^\$\(\)]+)\)', func2, string) + return re.sub(r"\$\(([^\$\(\)]+)\)", func2, string) def interpret(x): @@ -274,12 +277,18 @@ def interpret(x): Output: One of 1. Dict with keys 'name' and 'attrs' 2. 'unrecognized' (if interpretation failed) """ - for f in [interpret_md5, interpret_sha256, interpret_tarball_with_md5, interpret_tarball, interpret_jar]: + for f in [ + interpret_md5, + interpret_sha256, + interpret_tarball_with_md5, + interpret_tarball, + interpret_jar, + ]: result = f(x) if result is not None: return result - return 'unrecognized' + return "unrecognized" def interpret_md5(x): @@ -288,18 +297,18 @@ def interpret_md5(x): {'name': 'ODFGEN', 'attrs': {'md5': '32572ea48d9021bbd6fa317ddb697abc'}} """ - match = re.match('^(.*)_MD5SUM$', x['key']) + match = re.match("^(.*)_MD5SUM$", x["key"]) if match: - return {'name': match.group(1), - 'attrs': {'md5': x['value'], 'sha256': ''}} + return {"name": match.group(1), "attrs": {"md5": x["value"], "sha256": ""}} + def interpret_sha256(x): - match = re.match('^(.*)_SHA256SUM$', x['key']) + match = re.match("^(.*)_SHA256SUM$", x["key"]) if match: - return {'name': match.group(1), - 'attrs': {'sha256': x['value'], 'md5': ''}} + return {"name": match.group(1), "attrs": {"sha256": x["value"], "md5": ""}} + def interpret_tarball(x): """ @@ -308,18 +317,17 @@ def interpret_tarball(x): 'attrs': {'tarball': 'libfreehand-0.1.1.tar.bz2', 'brief': True}} """ - match = re.match('^(.*)_TARBALL$', x['key']) + match = re.match("^(.*)_TARBALL$", x["key"]) if match: - return {'name': match.group(1), - 'attrs': {'tarball': x['value'], 'brief': True}} + return {"name": match.group(1), "attrs": {"tarball": x["value"], "brief": True}} + def interpret_jar(x): - match = re.match('^(.*)_JAR$', x['key']) + match = re.match("^(.*)_JAR$", x["key"]) if match: - return {'name': match.group(1), - 'attrs': {'tarball': x['value'], 'brief': True}} + return {"name": match.group(1), "attrs": {"tarball": x["value"], "brief": True}} def interpret_tarball_with_md5(x): @@ -331,16 +339,21 @@ def interpret_tarball_with_md5(x): 'md5': '48d647fbd8ef8889e5a7f422c1bfda94', 'brief': False}} """ - match = {'key': re.match('^(.*)_(TARBALL|JAR)$', x['key']), - 'value': re.match('(?P[0-9a-fA-F]{32})-(?P.+)$', - x['value'])} + match = { + "key": re.match("^(.*)_(TARBALL|JAR)$", x["key"]), + "value": re.match("(?P[0-9a-fA-F]{32})-(?P.+)$", x["value"]), + } - if match['key'] and match['value']: - return {'name': match['key'].group(1), - 'attrs': {'tarball': match['value'].group('tarball'), - 'md5': match['value'].group('md5'), - 'sha256': '', - 'brief': False}} + if match["key"] and match["value"]: + return { + "name": match["key"].group(1), + "attrs": { + "tarball": match["value"].group("tarball"), + "md5": match["value"].group("md5"), + "sha256": "", + "brief": False, + }, + } main() diff --git a/pkgs/build-support/dart/build-dart-application/workspace-package-config.py b/pkgs/build-support/dart/build-dart-application/workspace-package-config.py index 7d7d00988f9ac..34ebfe7dcd794 100644 --- a/pkgs/build-support/dart/build-dart-application/workspace-package-config.py +++ b/pkgs/build-support/dart/build-dart-application/workspace-package-config.py @@ -28,12 +28,14 @@ def main() -> None: if not any( pkg["name"] == package_pubspec["name"] for pkg in package_config["packages"] ): - package_config["packages"].append({ - "name": package_pubspec["name"], - "rootUri": Path(package_path).resolve().as_uri(), - "packageUri": "lib/", - "languageVersion": languageVersion, - }) + package_config["packages"].append( + { + "name": package_pubspec["name"], + "rootUri": Path(package_path).resolve().as_uri(), + "packageUri": "lib/", + "languageVersion": languageVersion, + } + ) with Path(".dart_tool/package_config.json").open("w", encoding="utf-8") as f: json.dump(package_config, f, sort_keys=True, indent=4) diff --git a/pkgs/build-support/dlang/dub-to-nix/dub-to-nix.py b/pkgs/build-support/dlang/dub-to-nix/dub-to-nix.py index 879acc5f4acb6..febb4565f398a 100755 --- a/pkgs/build-support/dlang/dub-to-nix/dub-to-nix.py +++ b/pkgs/build-support/dlang/dub-to-nix/dub-to-nix.py @@ -12,7 +12,9 @@ def eprint(text: str): if not os.path.exists("dub.selections.json"): - eprint("The file `dub.selections.json` does not exist in the current working directory") + eprint( + "The file `dub.selections.json` does not exist in the current working directory" + ) eprint("run `dub upgrade` to generate it") sys.exit(1) @@ -22,10 +24,19 @@ def eprint(text: str): depsDict: dict = selectionsJson["versions"] # For each dependency expand non-expanded version into a dict with a "version" key -depsDict = {pname: (versionOrDepDict if isinstance(versionOrDepDict, dict) else {"version": versionOrDepDict}) for (pname, versionOrDepDict) in depsDict.items()} +depsDict = { + pname: ( + versionOrDepDict + if isinstance(versionOrDepDict, dict) + else {"version": versionOrDepDict} + ) + for (pname, versionOrDepDict) in depsDict.items() +} # Don't process path-type selections -depsDict = {pname: depDict for (pname, depDict) in depsDict.items() if "path" not in depDict} +depsDict = { + pname: depDict for (pname, depDict) in depsDict.items() if "path" not in depDict +} # Pre-validate selections before trying to fetch for pname in depsDict: @@ -35,7 +46,9 @@ def eprint(text: str): eprint(f'Expected version of "{pname}" to be non-branch type') eprint(f'Found: "{version}"') eprint("Please specify a non-branch version inside `dub.selections.json`") - eprint("When packaging, you might also need to patch the version value in the appropriate places (`dub.selections.json`, dub.sdl`, `dub.json`)") + eprint( + "When packaging, you might also need to patch the version value in the appropriate places (`dub.selections.json`, dub.sdl`, `dub.json`)" + ) sys.exit(1) if "repository" in depDict: repository = depDict["repository"] @@ -43,8 +56,14 @@ def eprint(text: str): eprint(f'Expected repository field of "{pname}" to begin with "git+"') eprint(f'Found: "{repository}"') sys.exit(1) - if (len(version) < 7 or len(version) > 40 or not all(c in string.hexdigits for c in version)): - eprint(f'Expected version field of "{pname}" to begin be a valid git revision') + if ( + len(version) < 7 + or len(version) > 40 + or not all(c in string.hexdigits for c in version) + ): + eprint( + f'Expected version field of "{pname}" to begin be a valid git revision' + ) eprint(f'Found: "{version}"') sys.exit(1) @@ -58,14 +77,30 @@ def eprint(text: str): strippedRepo = repository[4:] eprint(f"Fetching {pname}@{version} ({strippedRepo})") command = ["nix-prefetch-git", strippedRepo, version] - rawRes = subprocess.run(command, check=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout + rawRes = subprocess.run( + command, + check=True, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + ).stdout sha256 = json.loads(rawRes)["sha256"] - lockedDepsDict[pname] = {"version": version, "repository": strippedRepo, "sha256": sha256} + lockedDepsDict[pname] = { + "version": version, + "repository": strippedRepo, + "sha256": sha256, + } else: eprint(f"Fetching {pname}@{version}") url = f"https://code.dlang.org/packages/{pname}/{version}.zip" command = ["nix-prefetch-url", "--type", "sha256", url] - sha256 = subprocess.run(command, check=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.rstrip() + sha256 = subprocess.run( + command, + check=True, + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + ).stdout.rstrip() lockedDepsDict[pname] = {"version": version, "sha256": sha256} print(json.dumps({"dependencies": lockedDepsDict}, indent=2)) diff --git a/pkgs/build-support/docker/auto-layer.py b/pkgs/build-support/docker/auto-layer.py index 747ea9276e642..1bb29c0a1e014 100644 --- a/pkgs/build-support/docker/auto-layer.py +++ b/pkgs/build-support/docker/auto-layer.py @@ -50,13 +50,16 @@ import os import sys + def layer_count(layer_split): return len(set(layer_split.values())) + def path_key(path): - hash, name = path.split('-', 1) + hash, name = path.split("-", 1) return name, hash + def closure(*todo, key): """ Find all dependencies of the arguments including the arguments themselves. @@ -70,12 +73,14 @@ def closure(*todo, key): todo.update(key(x)) return done + def dependencies(*todo, key): """ Find all dependencies of the arguments excluding the arguments themselves. """ return closure(*todo, key=key) - set(todo) + def minimal_cover(paths, key): """ The minimal set of paths that together cover all input paths with their @@ -85,6 +90,7 @@ def minimal_cover(paths, key): paths_deps = set.union(*(dependencies(d, key=key) for d in paths)) return paths - paths_deps + def auto_layer(graph, ignore_paths, layer_limit): # Compute all direct users of each path nodes = {x["path"]: x | {"users": set()} for x in graph} @@ -145,19 +151,20 @@ def layer_info(layer_id): nonlocal nodes nonlocal layer_split # The full set of paths in this layer is all the paths that were assigned to it. - paths = {path - for path, layer_id_2 in layer_split.items() - if layer_id == layer_id_2} + paths = { + path for path, layer_id_2 in layer_split.items() if layer_id == layer_id_2 + } layerSize = sum(nodes[path]["narSize"] for path in paths) return { "usedBy": sorted(layer_id, key=path_key), "paths": sorted(paths, key=path_key), "layerSize": layerSize, - "closureSize": sum(nodes[path]["narSize"] for path in closure(*paths, key=node_deps)), + "closureSize": sum( + nodes[path]["narSize"] for path in closure(*paths, key=node_deps) + ), } - layers = {layer_id: layer_info(layer_id) - for layer_id in set(layer_split.values())} + layers = {layer_id: layer_info(layer_id) for layer_id in set(layer_split.values())} # The layer order doesn't actually matter for docker but it's still kind of neat to have layers come # after all of their dependencies. The easiest way to do that is to order by closure size since a @@ -173,22 +180,22 @@ def layer_info(layer_id): assert total_layer_size == total_nar_size, (total_layer_size, total_nar_size) # Format as a list of layers, each defined as a list of store paths. - return [[path - for path in layer["paths"] - if path not in ignore_paths] - for layer in layer_order - if set(layer["paths"]) - ignore_paths] + return [ + [path for path in layer["paths"] if path not in ignore_paths] + for layer in layer_order + if set(layer["paths"]) - ignore_paths + ] + -if __name__ == '__main__': +if __name__ == "__main__": import argparse parser = argparse.ArgumentParser( - prog='auto-layer', - description='Split store paths into docker layers.' + prog="auto-layer", description="Split store paths into docker layers." ) - parser.add_argument('graph_file') - parser.add_argument('ignore_file', default="/dev/null") - parser.add_argument('layer_limit', type=int, default=100) + parser.add_argument("graph_file") + parser.add_argument("ignore_file", default="/dev/null") + parser.add_argument("layer_limit", type=int, default=100) args = parser.parse_args() with open(args.graph_file) as f: diff --git a/pkgs/build-support/docker/detjson.py b/pkgs/build-support/docker/detjson.py index fe82cbea11bbf..cae16a7c2fa79 100644 --- a/pkgs/build-support/docker/detjson.py +++ b/pkgs/build-support/docker/detjson.py @@ -4,8 +4,9 @@ # Deterministic layer json: https://github.com/docker/hub-feedback/issues/488 import sys + reload(sys) -sys.setdefaultencoding('UTF8') +sys.setdefaultencoding("UTF8") import json # If any of the keys below are equal to a certain value @@ -17,13 +18,14 @@ "MacAddress": "", "NetworkDisabled": False, "PortSpecs": None, - "VolumeDriver": "" - } + "VolumeDriver": "", + }, } SAFEDELS["container_config"] = SAFEDELS["config"] + def makedet(j, safedels): - for k,v in safedels.items(): + for k, v in safedels.items(): if k not in j: continue if type(v) == dict: @@ -31,10 +33,12 @@ def makedet(j, safedels): elif j[k] == v: del j[k] + def main(): j = json.load(sys.stdin) makedet(j, SAFEDELS) json.dump(j, sys.stdout, sort_keys=True) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/pkgs/build-support/docker/stream_layered_image.py b/pkgs/build-support/docker/stream_layered_image.py index 0078c1cb764ea..25ff4b3a95648 100644 --- a/pkgs/build-support/docker/stream_layered_image.py +++ b/pkgs/build-support/docker/stream_layered_image.py @@ -225,9 +225,9 @@ def add_layer_dir(tar, paths, store_dir, mtime, uid, gid, uname, gname): """ invalid_paths = [i for i in paths if not i.startswith(store_dir)] - assert ( - len(invalid_paths) == 0 - ), f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}" + assert len(invalid_paths) == 0, ( + f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}" + ) # First, calculate the tarball checksum and the size. extract_checksum = ExtractChecksum() @@ -343,8 +343,10 @@ def main(): """, ) arg_parser.add_argument( - "--repo_tag", "-t", type=str, - help="Override the RepoTags from the configuration" + "--repo_tag", + "-t", + type=str, + help="Override the RepoTags from the configuration", ) args = arg_parser.parse_args() @@ -386,9 +388,7 @@ def main(): file=sys.stderr, ) layers.append( - add_customisation_layer( - tar, conf["customisation_layer"], mtime=mtime - ) + add_customisation_layer(tar, conf["customisation_layer"], mtime=mtime) ) print("Adding manifests...", file=sys.stderr) diff --git a/pkgs/build-support/dotnet/make-nuget-source/extract-licenses-from-nupkgs.py b/pkgs/build-support/dotnet/make-nuget-source/extract-licenses-from-nupkgs.py index 22564b0bb2bc8..b7bd55b6d843a 100644 --- a/pkgs/build-support/dotnet/make-nuget-source/extract-licenses-from-nupkgs.py +++ b/pkgs/build-support/dotnet/make-nuget-source/extract-licenses-from-nupkgs.py @@ -21,7 +21,9 @@ nupkg_dir = Path(sys.argv[1]) for nupkg_name in glob("*.nupkg", root_dir=nupkg_dir): with zipfile.ZipFile(nupkg_dir / nupkg_name) as nupkg: - for nuspec_name in [name for name in nupkg.namelist() if name.endswith(".nuspec")]: + for nuspec_name in [ + name for name in nupkg.namelist() if name.endswith(".nuspec") + ]: with nupkg.open(nuspec_name) as nuspec_stream: nuspec = ET.parse(nuspec_stream) licenses = nuspec.findall(".//{*}license[@type='expression']") diff --git a/pkgs/build-support/fetchpypilegacy/fetch-legacy.py b/pkgs/build-support/fetchpypilegacy/fetch-legacy.py index e031f244a7714..784036a0aa1b2 100644 --- a/pkgs/build-support/fetchpypilegacy/fetch-legacy.py +++ b/pkgs/build-support/fetchpypilegacy/fetch-legacy.py @@ -69,12 +69,16 @@ def try_fetch(url: str, package_name: str, package_filename: str) -> None: parsed_url = urlparse(index_url) username = parsed_url.username or username password = parsed_url.password or password - index_url = parsed_url._replace(netloc=parsed_url.netloc.rpartition("@")[-1]).geturl() + index_url = parsed_url._replace( + netloc=parsed_url.netloc.rpartition("@")[-1] + ).geturl() req = urllib.request.Request(index_url) if username and password: # Add authentication - password_b64 = base64.b64encode(":".join((username, password)).encode()).decode("utf-8") + password_b64 = base64.b64encode(":".join((username, password)).encode()).decode( + "utf-8" + ) req.add_header("Authorization", "Basic {}".format(password_b64)) else: # If we are not using authentication disable TLS verification for long term reproducibility context.check_hostname = False @@ -86,7 +90,10 @@ def try_fetch(url: str, package_name: str, package_filename: str) -> None: parser = Pep503() parser.feed(str(index, "utf-8")) if package_filename not in parser.sources: - print("The file %s has not be found in the index %s" % (package_filename, index_url)) + print( + "The file %s has not be found in the index %s" + % (package_filename, index_url) + ) exit(1) package_file = open(package_filename, "wb") diff --git a/pkgs/build-support/references-by-popularity/closure-graph.py b/pkgs/build-support/references-by-popularity/closure-graph.py index 4f8efd42ed816..9abb0d49343a5 100644 --- a/pkgs/build-support/references-by-popularity/closure-graph.py +++ b/pkgs/build-support/references-by-popularity/closure-graph.py @@ -120,76 +120,60 @@ def debug(msg, *args, **kwargs): if False: - print( - "DEBUG: {}".format( - msg.format(*args, **kwargs) - ), - file=sys.stderr - ) + print("DEBUG: {}".format(msg.format(*args, **kwargs)), file=sys.stderr) # Find paths in the original dataset which are never referenced by # any other paths def find_roots(closures): - roots = []; - + roots = [] for closure in closures: - path = closure['path'] + path = closure["path"] if not any_refer_to(path, closures): roots.append(path) return roots + class TestFindRoots(unittest.TestCase): def test_find_roots(self): self.assertCountEqual( - find_roots([ - { - "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] - }, - { - "path": "/nix/store/bar", - "references": [ - "/nix/store/bar", - "/nix/store/tux" - ] - }, - { - "path": "/nix/store/hello", - "references": [ - ] - } - ]), - ["/nix/store/foo", "/nix/store/hello"] + find_roots( + [ + { + "path": "/nix/store/foo", + "references": ["/nix/store/foo", "/nix/store/bar"], + }, + { + "path": "/nix/store/bar", + "references": ["/nix/store/bar", "/nix/store/tux"], + }, + {"path": "/nix/store/hello", "references": []}, + ] + ), + ["/nix/store/foo", "/nix/store/hello"], ) def any_refer_to(path, closures): for closure in closures: - if path != closure['path']: - if path in closure['references']: + if path != closure["path"]: + if path in closure["references"]: return True return False + class TestAnyReferTo(unittest.TestCase): def test_has_references(self): self.assertTrue( any_refer_to( "/nix/store/bar", [ - { - "path": "/nix/store/foo", - "references": [ - "/nix/store/bar" - ] - }, - ] + {"path": "/nix/store/foo", "references": ["/nix/store/bar"]}, + ], ), ) + def test_no_references(self): self.assertFalse( any_refer_to( @@ -197,20 +181,18 @@ def test_no_references(self): [ { "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] + "references": ["/nix/store/foo", "/nix/store/bar"], }, - ] + ], ), ) + def all_paths(closures): paths = [] for closure in closures: - paths.append(closure['path']) - paths.extend(closure['references']) + paths.append(closure["path"]) + paths.extend(closure["references"]) paths.sort() return list(set(paths)) @@ -218,29 +200,27 @@ def all_paths(closures): class TestAllPaths(unittest.TestCase): def test_returns_all_paths(self): self.assertCountEqual( - all_paths([ - { - "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] - }, - { - "path": "/nix/store/bar", - "references": [ - "/nix/store/bar", - "/nix/store/tux" - ] - }, - { - "path": "/nix/store/hello", - "references": [ - ] - } - ]), - ["/nix/store/foo", "/nix/store/bar", "/nix/store/hello", "/nix/store/tux",] + all_paths( + [ + { + "path": "/nix/store/foo", + "references": ["/nix/store/foo", "/nix/store/bar"], + }, + { + "path": "/nix/store/bar", + "references": ["/nix/store/bar", "/nix/store/tux"], + }, + {"path": "/nix/store/hello", "references": []}, + ] + ), + [ + "/nix/store/foo", + "/nix/store/bar", + "/nix/store/hello", + "/nix/store/tux", + ], ) + def test_no_references(self): self.assertFalse( any_refer_to( @@ -248,15 +228,13 @@ def test_no_references(self): [ { "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] + "references": ["/nix/store/foo", "/nix/store/bar"], }, - ] + ], ), ) + # Convert: # # [ @@ -280,42 +258,38 @@ def make_lookup(closures): for closure in closures: # paths often self-refer - nonreferential_paths = [ref for ref in closure['references'] if ref != closure['path']] - lookup[closure['path']] = nonreferential_paths + nonreferential_paths = [ + ref for ref in closure["references"] if ref != closure["path"] + ] + lookup[closure["path"]] = nonreferential_paths return lookup + class TestMakeLookup(unittest.TestCase): def test_returns_lookp(self): self.assertDictEqual( - make_lookup([ - { - "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] - }, - { - "path": "/nix/store/bar", - "references": [ - "/nix/store/bar", - "/nix/store/tux" - ] - }, - { - "path": "/nix/store/hello", - "references": [ - ] - } - ]), + make_lookup( + [ + { + "path": "/nix/store/foo", + "references": ["/nix/store/foo", "/nix/store/bar"], + }, + { + "path": "/nix/store/bar", + "references": ["/nix/store/bar", "/nix/store/tux"], + }, + {"path": "/nix/store/hello", "references": []}, + ] + ), { - "/nix/store/foo": [ "/nix/store/bar" ], - "/nix/store/bar": [ "/nix/store/tux" ], - "/nix/store/hello": [ ], - } + "/nix/store/foo": ["/nix/store/bar"], + "/nix/store/bar": ["/nix/store/tux"], + "/nix/store/hello": [], + }, ) + # Convert: # # /nix/store/foo with @@ -339,6 +313,8 @@ def test_returns_lookp(self): # } # } subgraphs_cache = {} + + def make_graph_segment_from_root(root, lookup): global subgraphs_cache children = {} @@ -357,31 +333,36 @@ def make_graph_segment_from_root(root, lookup): children[ref] = subgraphs_cache[ref] return children + class TestMakeGraphSegmentFromRoot(unittest.TestCase): def test_returns_graph(self): self.assertDictEqual( - make_graph_segment_from_root("/nix/store/foo", { - "/nix/store/foo": [ "/nix/store/bar" ], - "/nix/store/bar": [ "/nix/store/tux" ], - "/nix/store/tux": [ ], - "/nix/store/hello": [ ], - }), - { - "/nix/store/bar": { - "/nix/store/tux": {} - } - } + make_graph_segment_from_root( + "/nix/store/foo", + { + "/nix/store/foo": ["/nix/store/bar"], + "/nix/store/bar": ["/nix/store/tux"], + "/nix/store/tux": [], + "/nix/store/hello": [], + }, + ), + {"/nix/store/bar": {"/nix/store/tux": {}}}, ) + def test_returns_graph_tiny(self): self.assertDictEqual( - make_graph_segment_from_root("/nix/store/tux", { - "/nix/store/foo": [ "/nix/store/bar" ], - "/nix/store/bar": [ "/nix/store/tux" ], - "/nix/store/tux": [ ], - }), - {} + make_graph_segment_from_root( + "/nix/store/tux", + { + "/nix/store/foo": ["/nix/store/bar"], + "/nix/store/bar": ["/nix/store/tux"], + "/nix/store/tux": [], + }, + ), + {}, ) + # Convert a graph segment in to a popularity-counted dictionary: # # From: @@ -406,6 +387,8 @@ def test_returns_graph_tiny(self): # /nix/store/tux: 6 # ] popularity_cache = {} + + def graph_popularity_contest(full_graph): global popularity_cache popularity = defaultdict(int) @@ -430,29 +413,27 @@ def graph_popularity_contest(full_graph): return popularity + class TestGraphPopularityContest(unittest.TestCase): def test_counts_popularity(self): self.assertDictEqual( - graph_popularity_contest({ - "/nix/store/foo": { - "/nix/store/bar": { - "/nix/store/baz": { - "/nix/store/tux": {} - } - }, - "/nix/store/baz": { - "/nix/store/tux": {} + graph_popularity_contest( + { + "/nix/store/foo": { + "/nix/store/bar": {"/nix/store/baz": {"/nix/store/tux": {}}}, + "/nix/store/baz": {"/nix/store/tux": {}}, } } - }), + ), { - "/nix/store/foo": 1, - "/nix/store/bar": 2, - "/nix/store/baz": 4, - "/nix/store/tux": 6, - } + "/nix/store/foo": 1, + "/nix/store/bar": 2, + "/nix/store/baz": 4, + "/nix/store/tux": 6, + }, ) + # Emit a list of packages by popularity, most first: # # From: @@ -487,27 +468,26 @@ def order_by_popularity(paths): class TestOrderByPopularity(unittest.TestCase): def test_returns_in_order(self): self.assertEqual( - order_by_popularity({ - "/nix/store/foo": 1, - "/nix/store/bar": 1, - "/nix/store/baz": 2, - "/nix/store/tux": 2, - }), - [ - "/nix/store/baz", - "/nix/store/tux", - "/nix/store/bar", - "/nix/store/foo" - ] + order_by_popularity( + { + "/nix/store/foo": 1, + "/nix/store/bar": 1, + "/nix/store/baz": 2, + "/nix/store/tux": 2, + } + ), + ["/nix/store/baz", "/nix/store/tux", "/nix/store/bar", "/nix/store/foo"], ) + def package_name(path): - parts = path.split('-') + parts = path.split("-") start = parts.pop(0) # don't throw away any data, so the order is always the same. # even in cases where only the hash at the start has changed. parts.append(start) - return '-'.join(parts) + return "-".join(parts) + def main(): filename = sys.argv[1] @@ -538,7 +518,7 @@ def main(): graph = data[key] debug("Finding roots from {}", key) - roots = find_roots(graph); + roots = find_roots(graph) debug("Making lookup for {}", key) lookup = make_lookup(graph) @@ -560,8 +540,9 @@ def main(): ordered.extend(missing) print("\n".join(ordered)) + if "--test" in sys.argv: # Don't pass --test otherwise unittest gets mad - unittest.main(argv = [f for f in sys.argv if f != "--test" ]) + unittest.main(argv=[f for f in sys.argv if f != "--test"]) else: main() diff --git a/pkgs/build-support/replace-secret/replace-secret.py b/pkgs/build-support/replace-secret/replace-secret.py index 30ff41d491baa..e94f617f452d7 100755 --- a/pkgs/build-support/replace-secret/replace-secret.py +++ b/pkgs/build-support/replace-secret/replace-secret.py @@ -11,15 +11,14 @@ """ parser = argparse.ArgumentParser( - description=description, - formatter_class=RawDescriptionHelpFormatter + description=description, formatter_class=RawDescriptionHelpFormatter ) parser.add_argument("string_to_replace", help="the string to replace") parser.add_argument("secret_file", help="the file containing the secret") parser.add_argument("file", help="the file to perform the replacement on") args = parser.parse_args() -with open(args.secret_file) as sf, open(args.file, 'r+') as f: +with open(args.secret_file) as sf, open(args.file, "r+") as f: old = f.read() secret = sf.read().strip("\n") new_content = old.replace(args.string_to_replace, secret) diff --git a/pkgs/build-support/rust/fetch-cargo-vendor-util.py b/pkgs/build-support/rust/fetch-cargo-vendor-util.py index 183b587c42170..51b3c8ec1e41c 100644 --- a/pkgs/build-support/rust/fetch-cargo-vendor-util.py +++ b/pkgs/build-support/rust/fetch-cargo-vendor-util.py @@ -33,22 +33,22 @@ def get_lockfile_version(cargo_lock_toml: dict[str, Any]) -> int: def create_http_session() -> requests.Session: - retries = Retry( - total=5, - backoff_factor=0.5, - status_forcelist=[500, 502, 503, 504] - ) + retries = Retry(total=5, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504]) session = requests.Session() - session.mount('http://', HTTPAdapter(max_retries=retries)) - session.mount('https://', HTTPAdapter(max_retries=retries)) + session.mount("http://", HTTPAdapter(max_retries=retries)) + session.mount("https://", HTTPAdapter(max_retries=retries)) return session -def download_file_with_checksum(session: requests.Session, url: str, destination_path: Path) -> str: +def download_file_with_checksum( + session: requests.Session, url: str, destination_path: Path +) -> str: sha256_hash = hashlib.sha256() with session.get(url, stream=True) as response: if not response.ok: - raise Exception(f"Failed to fetch file from {url}. Status code: {response.status_code}") + raise Exception( + f"Failed to fetch file from {url}. Status code: {response.status_code}" + ) with open(destination_path, "wb") as file: for chunk in response.iter_content(1024): # Download in chunks if chunk: # Filter out keep-alive chunks @@ -67,13 +67,14 @@ def get_download_url_for_tarball(pkg: dict[str, Any]) -> str: if pkg["source"] != "registry+https://github.com/rust-lang/crates.io-index": raise Exception("Only the default crates.io registry is supported.") - return f"https://crates.io/api/v1/crates/{pkg["name"]}/{pkg["version"]}/download" + return f"https://crates.io/api/v1/crates/{pkg['name']}/{pkg['version']}/download" -def download_tarball(session: requests.Session, pkg: dict[str, Any], out_dir: Path) -> None: - +def download_tarball( + session: requests.Session, pkg: dict[str, Any], out_dir: Path +) -> None: url = get_download_url_for_tarball(pkg) - filename = f"{pkg["name"]}-{pkg["version"]}.tar.gz" + filename = f"{pkg['name']}-{pkg['version']}.tar.gz" # TODO: allow legacy checksum specification, see importCargoLock for example # also, don't forget about the other usage of the checksum @@ -85,19 +86,33 @@ def download_tarball(session: requests.Session, pkg: dict[str, Any], out_dir: Pa calculated_checksum = download_file_with_checksum(session, url, tarball_out_dir) if calculated_checksum != expected_checksum: - raise Exception(f"Hash mismatch! File fetched from {url} had checksum {calculated_checksum}, expected {expected_checksum}.") + raise Exception( + f"Hash mismatch! File fetched from {url} had checksum {calculated_checksum}, expected {expected_checksum}." + ) def download_git_tree(url: str, git_sha_rev: str, out_dir: Path) -> None: - tree_out_dir = out_dir / "git" / git_sha_rev eprint(f"Fetching {url}#{git_sha_rev} -> git/{git_sha_rev}") - cmd = ["nix-prefetch-git", "--builder", "--quiet", "--fetch-submodules", "--url", url, "--rev", git_sha_rev, "--out", str(tree_out_dir)] + cmd = [ + "nix-prefetch-git", + "--builder", + "--quiet", + "--fetch-submodules", + "--url", + url, + "--rev", + git_sha_rev, + "--out", + str(tree_out_dir), + ] subprocess.check_output(cmd) -GIT_SOURCE_REGEX = re.compile("git\\+(?P[^?]+)(\\?(?Prev|tag|branch)=(?P.*))?#(?P.*)") +GIT_SOURCE_REGEX = re.compile( + "git\\+(?P[^?]+)(\\?(?Prev|tag|branch)=(?P.*))?#(?P.*)" +) class GitSourceInfo(TypedDict): @@ -132,7 +147,7 @@ def create_vendor_staging(lockfile_path: Path, out_dir: Path) -> None: for pkg in cargo_lock_toml["package"]: # ignore local dependenices if "source" not in pkg.keys(): - eprint(f"Skipping local dependency: {pkg["name"]}") + eprint(f"Skipping local dependency: {pkg['name']}") continue source = pkg["source"] @@ -167,12 +182,22 @@ def create_vendor_staging(lockfile_path: Path, out_dir: Path) -> None: def get_manifest_metadata(manifest_path: Path) -> dict[str, Any]: - cmd = ["cargo", "metadata", "--format-version", "1", "--no-deps", "--manifest-path", str(manifest_path)] + cmd = [ + "cargo", + "metadata", + "--format-version", + "1", + "--no-deps", + "--manifest-path", + str(manifest_path), + ] output = subprocess.check_output(cmd) return json.loads(output) -def try_get_crate_manifest_path_from_mainfest_path(manifest_path: Path, crate_name: str) -> Path | None: +def try_get_crate_manifest_path_from_mainfest_path( + manifest_path: Path, crate_name: str +) -> Path | None: metadata = get_manifest_metadata(manifest_path) for pkg in metadata["packages"]: @@ -194,8 +219,9 @@ def find_crate_manifest_in_tree(tree: Path, crate_name: str) -> Path: raise Exception(f"Couldn't find manifest for crate {crate_name} inside {tree}.") -def copy_and_patch_git_crate_subtree(git_tree: Path, crate_name: str, crate_out_dir: Path) -> None: - +def copy_and_patch_git_crate_subtree( + git_tree: Path, crate_name: str, crate_out_dir: Path +) -> None: # This function will get called by copytree to decide which entries of a directory should be copied # We'll copy everything except symlinks that are invalid def ignore_func(dir_str: str, path_strs: list[str]) -> list[str]: @@ -220,7 +246,9 @@ def ignore_func(dir_str: str, path_strs: list[str]) -> list[str]: # This can be useful if the nix build sandbox is turned off and there is a symlink to a common absolute path if not target_path.is_relative_to(git_tree): ignorelist.append(path_str) - eprint(f"Symlink points outside of the crate's base git tree, ignoring: {path} -> {target_path}") + eprint( + f"Symlink points outside of the crate's base git tree, ignoring: {path} -> {target_path}" + ) continue return ignorelist @@ -252,7 +280,14 @@ def ignore_func(dir_str: str, path_strs: list[str]) -> list[str]: def extract_crate_tarball_contents(tarball_path: Path, crate_out_dir: Path) -> None: eprint(f"Unpacking to {crate_out_dir}") crate_out_dir.mkdir() - cmd = ["tar", "xf", str(tarball_path), "-C", str(crate_out_dir), "--strip-components=1"] + cmd = [ + "tar", + "xf", + str(tarball_path), + "-C", + str(crate_out_dir), + "--strip-components=1", + ] subprocess.check_output(cmd) @@ -265,26 +300,24 @@ def create_vendor(vendor_staging_dir: Path, out_dir: Path) -> None: lockfile_version = get_lockfile_version(cargo_lock_toml) config_lines = [ - '[source.vendored-sources]', + "[source.vendored-sources]", 'directory = "@vendor@"', - '[source.crates-io]', + "[source.crates-io]", 'replace-with = "vendored-sources"', ] seen_source_keys = set() for pkg in cargo_lock_toml["package"]: - # ignore local dependenices if "source" not in pkg.keys(): continue source: str = pkg["source"] - dir_name = f"{pkg["name"]}-{pkg["version"]}" + dir_name = f"{pkg['name']}-{pkg['version']}" crate_out_dir = out_dir / dir_name if source.startswith("git+"): - source_info = parse_git_source(pkg["source"], lockfile_version) git_sha_rev = source_info["git_sha_rev"] @@ -296,7 +329,7 @@ def create_vendor(vendor_staging_dir: Path, out_dir: Path) -> None: with open(crate_out_dir / ".cargo-checksum.json", "w") as f: json.dump({"files": {}}, f) - source_key = source[0:source.find("#")] + source_key = source[0 : source.find("#")] if source_key in seen_source_keys: continue @@ -310,8 +343,7 @@ def create_vendor(vendor_staging_dir: Path, out_dir: Path) -> None: config_lines.append('replace-with = "vendored-sources"') elif source.startswith("registry+"): - - filename = f"{pkg["name"]}-{pkg["version"]}.tar.gz" + filename = f"{pkg['name']}-{pkg['version']}.tar.gz" tarball_path = vendor_staging_dir / "tarballs" / filename extract_crate_tarball_contents(tarball_path, crate_out_dir) @@ -332,14 +364,20 @@ def main() -> None: subcommand = sys.argv[1] subcommand_func_dict = { - "create-vendor-staging": lambda: create_vendor_staging(lockfile_path=Path(sys.argv[2]), out_dir=Path(sys.argv[3])), - "create-vendor": lambda: create_vendor(vendor_staging_dir=Path(sys.argv[2]), out_dir=Path(sys.argv[3])) + "create-vendor-staging": lambda: create_vendor_staging( + lockfile_path=Path(sys.argv[2]), out_dir=Path(sys.argv[3]) + ), + "create-vendor": lambda: create_vendor( + vendor_staging_dir=Path(sys.argv[2]), out_dir=Path(sys.argv[3]) + ), } subcommand_func = subcommand_func_dict.get(subcommand) if subcommand_func is None: - raise Exception(f"Unknown subcommand: '{subcommand}'. Must be one of {list(subcommand_func_dict.keys())}") + raise Exception( + f"Unknown subcommand: '{subcommand}'. Must be one of {list(subcommand_func_dict.keys())}" + ) subcommand_func() diff --git a/pkgs/by-name/_1/_1password-gui/update-sources.py b/pkgs/by-name/_1/_1password-gui/update-sources.py index bdf257cb16646..45cc085252435 100755 --- a/pkgs/by-name/_1/_1password-gui/update-sources.py +++ b/pkgs/by-name/_1/_1password-gui/update-sources.py @@ -11,6 +11,7 @@ DOWNLOADS_BASE_URL = "https://downloads.1password.com" OP_PGP_KEY_URL = "https://downloads.1password.com/linux/keys/1password.asc" + class Sources(OrderedDict): def __init__(self): with open("sources.json", "r") as fp: @@ -32,7 +33,7 @@ def __init__(self): return self.sq = shutil.which("sq") - if (self.sq is None): + if self.sq is None: raise SystemExit(f"sequoia sq not found") self.signer_file, _ = nix_store_prefetch(OP_PGP_KEY_URL) @@ -45,7 +46,7 @@ def verify(self, sig, tarball): "verify", f"--signer-file={self.signer_file}", f"--signature-file={sig}", - tarball + tarball, ] try: diff --git a/pkgs/by-name/an/anyk/patch_paths.py b/pkgs/by-name/an/anyk/patch_paths.py index 5e2306bd3c9a8..302c2a824a246 100644 --- a/pkgs/by-name/an/anyk/patch_paths.py +++ b/pkgs/by-name/an/anyk/patch_paths.py @@ -2,10 +2,12 @@ from struct import pack import sys + def to_java_string(string) -> bytes: - string_bytes = string.encode("utf-8") - # Java constant pool string entries are prefixed by 0x01 and 16-bit big-endian string length. - return pack(">BH", 1, len(string_bytes)) + string_bytes + string_bytes = string.encode("utf-8") + # Java constant pool string entries are prefixed by 0x01 and 16-bit big-endian string length. + return pack(">BH", 1, len(string_bytes)) + string_bytes + class_file = Path(sys.argv[1]) @@ -13,23 +15,37 @@ def to_java_string(string) -> bytes: # We want to fix these package names so they work with the open-source Java EE releases instead of OpenJDK 8. patches = [ - ( "com/sun/xml/internal/ws/developer/WSBindingProvider", "com/sun/xml/ws/developer/WSBindingProvider" ), - ( "com/sun/xml/internal/ws/api/message/Header", "com/sun/xml/ws/api/message/Header" ), - ( "com.sun.xml.internal.ws.transport.http.client.streaming.chunk.size", "com.sun.xml.ws.transport.http.client.streaming.chunk.size" ), - ( "com/sun/xml/internal/ws/api/message/Headers", "com/sun/xml/ws/api/message/Headers" ), - ( "(Lorg/w3c/dom/Element;)Lcom/sun/xml/internal/ws/api/message/Header;", "(Lorg/w3c/dom/Element;)Lcom/sun/xml/ws/api/message/Header;" ), - ( "([Lcom/sun/xml/internal/ws/api/message/Header;)V", "([Lcom/sun/xml/ws/api/message/Header;)V" ), + ( + "com/sun/xml/internal/ws/developer/WSBindingProvider", + "com/sun/xml/ws/developer/WSBindingProvider", + ), + ("com/sun/xml/internal/ws/api/message/Header", "com/sun/xml/ws/api/message/Header"), + ( + "com.sun.xml.internal.ws.transport.http.client.streaming.chunk.size", + "com.sun.xml.ws.transport.http.client.streaming.chunk.size", + ), + ( + "com/sun/xml/internal/ws/api/message/Headers", + "com/sun/xml/ws/api/message/Headers", + ), + ( + "(Lorg/w3c/dom/Element;)Lcom/sun/xml/internal/ws/api/message/Header;", + "(Lorg/w3c/dom/Element;)Lcom/sun/xml/ws/api/message/Header;", + ), + ( + "([Lcom/sun/xml/internal/ws/api/message/Header;)V", + "([Lcom/sun/xml/ws/api/message/Header;)V", + ), ] for old, new in patches: - old_java = to_java_string(old) - new_java = to_java_string(new) - assert old_java in clazz - clazz = clazz.replace(old_java, new_java) - assert old_java not in clazz - assert new_java in clazz + old_java = to_java_string(old) + new_java = to_java_string(new) + assert old_java in clazz + clazz = clazz.replace(old_java, new_java) + assert old_java not in clazz + assert new_java in clazz assert b".internal." not in clazz class_file.write_bytes(clazz) - diff --git a/pkgs/by-name/an/anyk/update.py b/pkgs/by-name/an/anyk/update.py index 665e9fa56a216..a9783009a9753 100755 --- a/pkgs/by-name/an/anyk/update.py +++ b/pkgs/by-name/an/anyk/update.py @@ -9,12 +9,17 @@ # NAV doesn't provide stable versioned URLs so we put the download link in Wayback Machine to preserve it. print("Archiving...") -save_api = waybackpy.WaybackMachineSaveAPI("https://nav.gov.hu/pfile/programFile?path=/nyomtatvanyok/letoltesek/nyomtatvanykitolto_programok/nyomtatvany_apeh/keretprogramok/AbevJava") +save_api = waybackpy.WaybackMachineSaveAPI( + "https://nav.gov.hu/pfile/programFile?path=/nyomtatvanyok/letoltesek/nyomtatvanykitolto_programok/nyomtatvany_apeh/keretprogramok/AbevJava" +) url = save_api.save() print("Prefetching...") -sha256, unpack_path = subprocess.check_output(["nix-prefetch-url", "--unpack", "--print-path", "--name", "abevjava", url], universal_newlines=True).split("\n")[:2] +sha256, unpack_path = subprocess.check_output( + ["nix-prefetch-url", "--unpack", "--print-path", "--name", "abevjava", url], + universal_newlines=True, +).split("\n")[:2] print("Extracting version...") manifest = (Path(unpack_path) / "META-INF" / "MANIFEST.MF").read_text() @@ -22,8 +27,14 @@ version = re.search("Implementation-Version: (.+)", manifest).group(1) print("Writing version.json...") -(Path(__file__).parent / "version.json").write_text(json.dumps({ - "url": url, - "sha256": sha256, - "version": version, -}, indent=2) + "\n") +(Path(__file__).parent / "version.json").write_text( + json.dumps( + { + "url": url, + "sha256": sha256, + "version": version, + }, + indent=2, + ) + + "\n" +) diff --git a/pkgs/by-name/ap/apache-airflow/update-providers.py b/pkgs/by-name/ap/apache-airflow/update-providers.py index 207c381192586..24beaa238dfe9 100755 --- a/pkgs/by-name/ap/apache-airflow/update-providers.py +++ b/pkgs/by-name/ap/apache-airflow/update-providers.py @@ -88,9 +88,7 @@ def name_to_attr_path(req: str, packages: Dict[str, Dict[str, str]]) -> Optional # python(minor).(major)-(pname)-(version or unstable-date) # we need the version qualifier, or we'll have multiple matches # (e.g. pyserial and pyserial-asyncio when looking for pyserial) - pattern = re.compile( - f"^python\\d+\\.\\d+-{name}-(?:\\d|unstable-.*)", re.I - ) + pattern = re.compile(f"^python\\d+\\.\\d+-{name}-(?:\\d|unstable-.*)", re.I) for attr_path, package in packages.items(): # logging.debug("Checking match for %s with %s", name, package["name"]) if pattern.match(package["name"]): diff --git a/pkgs/by-name/au/auto-patchelf/source/auto-patchelf.py b/pkgs/by-name/au/auto-patchelf/source/auto-patchelf.py index bf8882818dd97..4ab724ecd5279 100644 --- a/pkgs/by-name/au/auto-patchelf/source/auto-patchelf.py +++ b/pkgs/by-name/au/auto-patchelf/source/auto-patchelf.py @@ -26,14 +26,13 @@ @contextmanager def open_elf(path: Path) -> Iterator[ELFFile]: - with path.open('rb') as stream: + with path.open("rb") as stream: yield ELFFile(stream) def is_static_executable(elf: ELFFile) -> bool: # Statically linked executables have an ELF type of EXEC but no INTERP. - return (elf.header["e_type"] == 'ET_EXEC' - and not elf.get_section_by_name(".interp")) + return elf.header["e_type"] == "ET_EXEC" and not elf.get_section_by_name(".interp") def is_dynamic_executable(elf: ELFFile) -> bool: @@ -42,12 +41,17 @@ def is_dynamic_executable(elf: ELFFile) -> bool: # section but their ELF type is DYN. return bool(elf.get_section_by_name(".interp")) + def is_separate_debug_object(elf: ELFFile) -> bool: # objects created by separateDebugInfo = true have all the section headers # of the unstripped objects but those that normal `strip` would have kept # are NOBITS text_section = elf.get_section_by_name(".text") - return elf.has_dwarf_info() and bool(text_section) and text_section.header['sh_type'] == "SHT_NOBITS" + return ( + elf.has_dwarf_info() + and bool(text_section) + and text_section.header["sh_type"] == "SHT_NOBITS" + ) def get_dependencies(elf: ELFFile) -> list[list[Path]]: @@ -57,9 +61,9 @@ def get_dependencies(elf: ELFFile) -> list[list[Path]]: # instance of DynamicSection, but that is required to call iter_tags for section in elf.iter_sections(): if isinstance(section, DynamicSection): - for tag in section.iter_tags('DT_NEEDED'): + for tag in section.iter_tags("DT_NEEDED"): dependencies.append([Path(tag.needed)]) - break # There is only one dynamic section + break # There is only one dynamic section return dependencies @@ -91,13 +95,13 @@ def get_rpath(elf: ELFFile) -> list[str]: # instance of DynamicSection, but that is required to call iter_tags for section in elf.iter_sections(): if isinstance(section, DynamicSection): - for tag in section.iter_tags('DT_RUNPATH'): - return tag.runpath.split(':') + for tag in section.iter_tags("DT_RUNPATH"): + return tag.runpath.split(":") - for tag in section.iter_tags('DT_RPATH'): - return tag.rpath.split(':') + for tag in section.iter_tags("DT_RPATH"): + return tag.rpath.split(":") - break # There is only one dynamic section + break # There is only one dynamic section return [] @@ -128,13 +132,13 @@ def osabi_are_compatible(wanted: str, got: str) -> bool: # compatibility into SHT_NOTE sections like .note.tag and # .note.ABI-tag. It would be prudent to add these to the detection # logic to produce better ABI information. - if wanted == 'ELFOSABI_SYSV': + if wanted == "ELFOSABI_SYSV": return True # Similarly here, we should be able to link against a superset of # features, so even if the target has another ABI, this should be # fine. - if got == 'ELFOSABI_SYSV': + if got == "ELFOSABI_SYSV": return True # Otherwise, we simply return whether the ABIs are identical. @@ -156,7 +160,7 @@ def glob(path: Path, pattern: str, recursive: bool) -> Iterator[Path]: soname_cache: DefaultDict[tuple[str, str], list[tuple[Path, str]]] = defaultdict(list) -def populate_cache(initial: list[Path], recursive: bool =False) -> None: +def populate_cache(initial: list[Path], recursive: bool = False) -> None: lib_dirs = list(initial) while lib_dirs: @@ -182,13 +186,16 @@ def populate_cache(initial: list[Path], recursive: bool =False) -> None: try: with open_elf(path) as elf: if is_separate_debug_object(elf): - print(f"skipping {path} because it looks like a separate debug object") + print( + f"skipping {path} because it looks like a separate debug object" + ) continue osabi = get_osabi(elf) arch = get_arch(elf) - rpath = [Path(p) for p in get_rpath(elf) - if p and '$ORIGIN' not in p] + rpath = [ + Path(p) for p in get_rpath(elf) if p and "$ORIGIN" not in p + ] lib_dirs += rpath soname_cache[(path.name, arch)].append((resolved.parent, osabi)) @@ -206,15 +213,20 @@ def find_dependency(soname: str, soarch: str, soabi: str) -> Optional[Path]: @dataclass class Dependency: - file: Path # The file that contains the dependency - name: Path # The name of the dependency - found: bool = False # Whether it was found somewhere - - -def auto_patchelf_file(path: Path, runtime_deps: list[Path], append_rpaths: list[Path] = [], keep_libc: bool = False, extra_args: list[str] = []) -> list[Dependency]: + file: Path # The file that contains the dependency + name: Path # The name of the dependency + found: bool = False # Whether it was found somewhere + + +def auto_patchelf_file( + path: Path, + runtime_deps: list[Path], + append_rpaths: list[Path] = [], + keep_libc: bool = False, + extra_args: list[str] = [], +) -> list[Dependency]: try: with open_elf(path) as elf: - if is_static_executable(elf): # No point patching these print(f"skipping {path} because it is statically linked") @@ -229,14 +241,18 @@ def auto_patchelf_file(path: Path, runtime_deps: list[Path], append_rpaths: list if interpreter_arch != file_arch: # Our target architecture is different than this file's # architecture, so skip it. - print(f"skipping {path} because its architecture ({file_arch})" - f" differs from target ({interpreter_arch})") + print( + f"skipping {path} because its architecture ({file_arch})" + f" differs from target ({interpreter_arch})" + ) return [] file_osabi = get_osabi(elf) if not osabi_are_compatible(interpreter_osabi, file_osabi): - print(f"skipping {path} because its OS ABI ({file_osabi}) is" - f" not compatible with target ({interpreter_osabi})") + print( + f"skipping {path} because its OS ABI ({file_osabi}) is" + f" not compatible with target ({interpreter_osabi})" + ) return [] file_is_dynamic_executable = is_dynamic_executable(elf) @@ -248,14 +264,21 @@ def auto_patchelf_file(path: Path, runtime_deps: list[Path], append_rpaths: list # these platforms are packaged in nixpkgs with ld.so in a separate derivation # than libc.so and friends. keep_libc is mandatory. - keep_libc |= file_osabi in ('ELFOSABI_FREEBSD', 'ELFOSABI_OPENBSD') + keep_libc |= file_osabi in ("ELFOSABI_FREEBSD", "ELFOSABI_OPENBSD") rpath = [] if file_is_dynamic_executable: print("setting interpreter of", path) subprocess.run( - ["patchelf", "--set-interpreter", interpreter_path.as_posix(), path.as_posix()] + extra_args, - check=True) + [ + "patchelf", + "--set-interpreter", + interpreter_path.as_posix(), + path.as_posix(), + ] + + extra_args, + check=True, + ) rpath += runtime_deps print("searching for dependencies of", path) @@ -266,7 +289,6 @@ def auto_patchelf_file(path: Path, runtime_deps: list[Path], append_rpaths: list for dep in file_dependencies: was_found = False for candidate in dep: - # This loop determines which candidate for a given # dependency can be found, and how. There may be multiple # candidates for a dep because of '.note.dlopen' @@ -298,7 +320,9 @@ def auto_patchelf_file(path: Path, runtime_deps: list[Path], append_rpaths: list elif is_libc and not keep_libc: was_found = True break - elif found_dependency := find_dependency(candidate.name, file_arch, file_osabi): + elif found_dependency := find_dependency( + candidate.name, file_arch, file_osabi + ): rpath.append(found_dependency) dependencies.append(Dependency(path, candidate, found=True)) print(f" {candidate} -> found: {found_dependency}") @@ -321,23 +345,24 @@ def auto_patchelf_file(path: Path, runtime_deps: list[Path], append_rpaths: list if rpath: print("setting RPATH to:", rpath_str) subprocess.run( - ["patchelf", "--set-rpath", rpath_str, path.as_posix()] + extra_args, - check=True) + ["patchelf", "--set-rpath", rpath_str, path.as_posix()] + extra_args, + check=True, + ) return dependencies def auto_patchelf( - paths_to_patch: list[Path], - lib_dirs: list[Path], - runtime_deps: list[Path], - recursive: bool = True, - ignore_missing: list[str] = [], - append_rpaths: list[Path] = [], - keep_libc: bool = False, - add_existing: bool = True, - extra_args: list[str] = []) -> None: - + paths_to_patch: list[Path], + lib_dirs: list[Path], + runtime_deps: list[Path], + recursive: bool = True, + ignore_missing: list[str] = [], + append_rpaths: list[Path] = [], + keep_libc: bool = False, + add_existing: bool = True, + extra_args: list[str] = [], +) -> None: if not paths_to_patch: sys.exit("No paths to patch, stopping.") @@ -349,9 +374,11 @@ def auto_patchelf( populate_cache(lib_dirs) dependencies = [] - for path in chain.from_iterable(glob(p, '*', recursive) for p in paths_to_patch): + for path in chain.from_iterable(glob(p, "*", recursive) for p in paths_to_patch): if not path.is_symlink() and path.is_file(): - dependencies += auto_patchelf_file(path, runtime_deps, append_rpaths, keep_libc, extra_args) + dependencies += auto_patchelf_file( + path, runtime_deps, append_rpaths, keep_libc, extra_args + ) missing = [dep for dep in dependencies if not dep.found] @@ -361,36 +388,43 @@ def auto_patchelf( for dep in missing: for pattern in ignore_missing: if fnmatch(dep.name.name, pattern): - print(f"warn: auto-patchelf ignoring missing {dep.name} wanted by {dep.file}") + print( + f"warn: auto-patchelf ignoring missing {dep.name} wanted by {dep.file}" + ) break else: - print(f"error: auto-patchelf could not satisfy dependency {dep.name} wanted by {dep.file}") + print( + f"error: auto-patchelf could not satisfy dependency {dep.name} wanted by {dep.file}" + ) failure = True if failure: - sys.exit('auto-patchelf failed to find all the required dependencies.\n' - 'Add the missing dependencies to --libs or use ' - '`--ignore-missing="foo.so.1 bar.so etc.so"`.') + sys.exit( + "auto-patchelf failed to find all the required dependencies.\n" + "Add the missing dependencies to --libs or use " + '`--ignore-missing="foo.so.1 bar.so etc.so"`.' + ) def main() -> None: parser = argparse.ArgumentParser( prog="auto-patchelf", - description='auto-patchelf tries as hard as possible to patch the' - ' provided binary files by looking for compatible' - ' libraries in the provided paths.') + description="auto-patchelf tries as hard as possible to patch the" + " provided binary files by looking for compatible" + " libraries in the provided paths.", + ) parser.add_argument( "--ignore-missing", nargs="*", type=str, default=[], - help="Do not fail when some dependencies are not found." + help="Do not fail when some dependencies are not found.", ) parser.add_argument( "--no-recurse", dest="recursive", action="store_false", - help="Disable the recursive traversal of paths to patch." + help="Disable the recursive traversal of paths to patch.", ) parser.add_argument( "--paths", @@ -398,8 +432,8 @@ def main() -> None: type=Path, required=True, help="Paths whose content needs to be patched." - " Single files and directories are accepted." - " Directories are traversed recursively by default." + " Single files and directories are accepted." + " Directories are traversed recursively by default.", ) parser.add_argument( "--libs", @@ -407,8 +441,8 @@ def main() -> None: type=Path, default=[], help="Paths where libraries are searched for." - " Single files and directories are accepted." - " Directories are not searched recursively." + " Single files and directories are accepted." + " Directories are not searched recursively.", ) parser.add_argument( "--runtime-dependencies", @@ -416,7 +450,7 @@ def main() -> None: type=Path, default=[], help="Paths to prepend to the runtime path of executable binaries." - " Subject to deduplication, which may imply some reordering." + " Subject to deduplication, which may imply some reordering.", ) parser.add_argument( "--append-rpaths", @@ -461,18 +495,19 @@ def main() -> None: append_rpaths=args.append_rpaths, keep_libc=args.keep_libc, add_existing=args.add_existing, - extra_args=args.extra_args) + extra_args=args.extra_args, + ) -interpreter_path: Path = None # type: ignore -interpreter_osabi: str = None # type: ignore -interpreter_arch: str = None # type: ignore -libc_lib: Path = None # type: ignore +interpreter_path: Path = None # type: ignore +interpreter_osabi: str = None # type: ignore +interpreter_arch: str = None # type: ignore +libc_lib: Path = None # type: ignore if __name__ == "__main__": - nix_support = Path(os.environ.get('NIX_BINTOOLS', DEFAULT_BINTOOLS)) / 'nix-support' - interpreter_path = Path((nix_support / 'dynamic-linker').read_text().strip()) - libc_lib = Path((nix_support / 'orig-libc').read_text().strip()) / 'lib' + nix_support = Path(os.environ.get("NIX_BINTOOLS", DEFAULT_BINTOOLS)) / "nix-support" + interpreter_path = Path((nix_support / "dynamic-linker").read_text().strip()) + libc_lib = Path((nix_support / "orig-libc").read_text().strip()) / "lib" with open_elf(interpreter_path) as interpreter: interpreter_osabi = get_osabi(interpreter) diff --git a/pkgs/by-name/ba/backgroundremover/test-script.py b/pkgs/by-name/ba/backgroundremover/test-script.py index 29af6fd76e80b..817afdbdaba88 100644 --- a/pkgs/by-name/ba/backgroundremover/test-script.py +++ b/pkgs/by-name/ba/backgroundremover/test-script.py @@ -6,15 +6,15 @@ parser = ArgumentParser() -parser.add_argument('input', type=Path) -parser.add_argument('output', type=Path) +parser.add_argument("input", type=Path) +parser.add_argument("output", type=Path) args = parser.parse_args() input_bytes = args.input.read_bytes() output_bytes = bg.remove( - input_bytes, + input_bytes, ) args.output.write_bytes(output_bytes) diff --git a/pkgs/by-name/ca/calamares-nixos-extensions/src/modules/nixos/main.py b/pkgs/by-name/ca/calamares-nixos-extensions/src/modules/nixos/main.py index 261a03cc9a8ca..1967584aff7ee 100644 --- a/pkgs/by-name/ca/calamares-nixos-extensions/src/modules/nixos/main.py +++ b/pkgs/by-name/ca/calamares-nixos-extensions/src/modules/nixos/main.py @@ -316,26 +316,30 @@ boot.kernelPackages = pkgs.linuxPackages_latest; """ + + def env_is_set(name): envValue = os.environ.get(name) return not (envValue is None or envValue == "") + def generateProxyStrings(): proxyEnv = [] - if env_is_set('http_proxy'): - proxyEnv.append('http_proxy={}'.format(os.environ.get('http_proxy'))) - if env_is_set('https_proxy'): - proxyEnv.append('https_proxy={}'.format(os.environ.get('https_proxy'))) - if env_is_set('HTTP_PROXY'): - proxyEnv.append('HTTP_PROXY={}'.format(os.environ.get('HTTP_PROXY'))) - if env_is_set('HTTPS_PROXY'): - proxyEnv.append('HTTPS_PROXY={}'.format(os.environ.get('HTTPS_PROXY'))) + if env_is_set("http_proxy"): + proxyEnv.append("http_proxy={}".format(os.environ.get("http_proxy"))) + if env_is_set("https_proxy"): + proxyEnv.append("https_proxy={}".format(os.environ.get("https_proxy"))) + if env_is_set("HTTP_PROXY"): + proxyEnv.append("HTTP_PROXY={}".format(os.environ.get("HTTP_PROXY"))) + if env_is_set("HTTPS_PROXY"): + proxyEnv.append("HTTPS_PROXY={}".format(os.environ.get("HTTPS_PROXY"))) if len(proxyEnv) > 0: proxyEnv.insert(0, "env") return proxyEnv + def pretty_name(): return _("Installing NixOS.") @@ -368,7 +372,7 @@ def run(): libcalamares.job.setprogress(0.1) ngc_cfg = configparser.ConfigParser() - ngc_cfg["Defaults"] = { "Kernel": "lts" } + ngc_cfg["Defaults"] = {"Kernel": "lts"} ngc_cfg.read("/etc/nixos-generate-config.conf") # Create initial config file @@ -790,7 +794,7 @@ def run(): libcalamares.job.setprogress(0.3) # build nixos-install command - nixosInstallCmd = [ "pkexec" ] + nixosInstallCmd = ["pkexec"] nixosInstallCmd.extend(generateProxyStrings()) nixosInstallCmd.extend( [ @@ -814,9 +818,7 @@ def run(): try: output = "" proc = subprocess.Popen( - nixosInstallCmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT + nixosInstallCmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) while True: line = proc.stdout.readline().decode("utf-8") diff --git a/pkgs/by-name/dp/dprint/plugins/update-plugins.py b/pkgs/by-name/dp/dprint/plugins/update-plugins.py index 9c1e5c91883ca..b00140e447f8c 100755 --- a/pkgs/by-name/dp/dprint/plugins/update-plugins.py +++ b/pkgs/by-name/dp/dprint/plugins/update-plugins.py @@ -93,7 +93,7 @@ def get_update_url(plugin_url): def write_plugin_derivation(drv_attrs): drv = f"{{ mkDprintPlugin }}: mkDprintPlugin {json_to_nix(drv_attrs)}" - filepath = SCRIPT_DIR / f"{drv_attrs["pname"]}.nix" + filepath = SCRIPT_DIR / f"{drv_attrs['pname']}.nix" with open(filepath, "w+", encoding="utf8") as f: f.write(drv) nixfmt(filepath) diff --git a/pkgs/by-name/en/enpass/update_script.py b/pkgs/by-name/en/enpass/update_script.py index ab0b6ce3f48ed..00a98a7794cf6 100755 --- a/pkgs/by-name/en/enpass/update_script.py +++ b/pkgs/by-name/en/enpass/update_script.py @@ -21,8 +21,9 @@ with open(DATA_JSON, "r") as versions_file: versions = json.load(versions_file) + def find_latest_version(arch): - CHECK_URL = f'https://apt.enpass.io/dists/stable/main/binary-{arch}/Packages.gz' + CHECK_URL = f"https://apt.enpass.io/dists/stable/main/binary-{arch}/Packages.gz" packages = gzip.decompress(requests.get(CHECK_URL).content).decode() # Loop every package to find the newest one! @@ -32,32 +33,37 @@ def find_latest_version(arch): last_version = version.parse("0") for package in packages.split("\n\n"): matches = version_selector.search(package) - matched_version = matches.group('version') if matches and matches.group('version') else "0" + matched_version = ( + matches.group("version") if matches and matches.group("version") else "0" + ) parsed_version = version.parse(matched_version) if parsed_version > last_version: - path = path_selector.search(package).group('path') - sha256 = hash_selector.search(package).group('sha256') + path = path_selector.search(package).group("path") + sha256 = hash_selector.search(package).group("sha256") last_version = parsed_version return {"path": path, "sha256": sha256, "version": matched_version} + for arch in versions.keys(): - current_version = versions[arch]['version'] + current_version = versions[arch]["version"] logging.info(f"Current Version for {arch} is {current_version}") new_version = find_latest_version(arch) - if not new_version or new_version['version'] == current_version: + if not new_version or new_version["version"] == current_version: continue last_current_version = current_version last_new_version = new_version - logging.info(f"Update found ({arch}): enpass: {current_version} -> {new_version['version']}") - versions[arch]['path'] = new_version['path'] - versions[arch]['sha256'] = new_version['sha256'] - versions[arch]['version'] = new_version['version'] + logging.info( + f"Update found ({arch}): enpass: {current_version} -> {new_version['version']}" + ) + versions[arch]["path"] = new_version["path"] + versions[arch]["sha256"] = new_version["sha256"] + versions[arch]["version"] = new_version["version"] if not last_new_version: - logging.info('#### No update found ####') + logging.info("#### No update found ####") sys.exit(0) # write new versions back @@ -68,7 +74,7 @@ def find_latest_version(arch): # Commit the result: logging.info("Committing changes...") commit_message = f"enpass: {last_current_version} -> {last_new_version['version']}" -subprocess.run(['git', 'add', DATA_JSON], check=True) -subprocess.run(['git', 'commit', '--file=-'], input=commit_message.encode(), check=True) +subprocess.run(["git", "add", DATA_JSON], check=True) +subprocess.run(["git", "commit", "--file=-"], input=commit_message.encode(), check=True) logging.info("Done.") diff --git a/pkgs/by-name/fe/fermyon-spin/update.py b/pkgs/by-name/fe/fermyon-spin/update.py index 2a2129a5926ed..3e4cae740ef30 100755 --- a/pkgs/by-name/fe/fermyon-spin/update.py +++ b/pkgs/by-name/fe/fermyon-spin/update.py @@ -6,50 +6,60 @@ # Outer keys are as in Nix, the inner dict's values are as in upstream. # We set oldHash and newHash fields in the inner dict later. systems = { - "x86_64-linux": {"os": "linux", "arch": "amd64"}, - "x86_64-darwin": {"os": "macos", "arch": "amd64"}, - "aarch64-linux": {"os": "linux", "arch": "aarch64"}, - "aarch64-darwin": {"os": "macos", "arch": "aarch64"}, + "x86_64-linux": {"os": "linux", "arch": "amd64"}, + "x86_64-darwin": {"os": "macos", "arch": "amd64"}, + "aarch64-linux": {"os": "linux", "arch": "aarch64"}, + "aarch64-darwin": {"os": "macos", "arch": "aarch64"}, } # This will set the version correctly, # and will also set the hash for one of the systems. -subprocess.run([ - "nix-update", - "fermyon-spin", - "--version-regex", - r"^v([\d\.]*)", - "--url", - "https://github.com/spinframework/spin" -]) +subprocess.run( + [ + "nix-update", + "fermyon-spin", + "--version-regex", + r"^v([\d\.]*)", + "--url", + "https://github.com/spinframework/spin", + ] +) newVer = subprocess.run( - ["nix-instantiate", "--eval", "--raw", "-A", "fermyon-spin.version"], capture_output=True, encoding="locale" + ["nix-instantiate", "--eval", "--raw", "-A", "fermyon-spin.version"], + capture_output=True, + encoding="locale", ).stdout for nixTuple in systems: - url = ( - "https://github.com/spinframework/spin/releases/download/v" - f"{newVer}/spin-v{newVer}-{systems[nixTuple]['os']}-{systems[nixTuple]['arch']}.tar.gz" - ) + url = ( + "https://github.com/spinframework/spin/releases/download/v" + f"{newVer}/spin-v{newVer}-{systems[nixTuple]['os']}-{systems[nixTuple]['arch']}.tar.gz" + ) - systems[nixTuple]["oldHash"] = subprocess.run( - ["nix-instantiate", "--eval", "--raw", "-A", f"fermyon-spin.passthru.packageHashes.{nixTuple}"], - capture_output=True, - encoding="locale", - ).stdout + systems[nixTuple]["oldHash"] = subprocess.run( + [ + "nix-instantiate", + "--eval", + "--raw", + "-A", + f"fermyon-spin.passthru.packageHashes.{nixTuple}", + ], + capture_output=True, + encoding="locale", + ).stdout - systems[nixTuple]["newHash"] = subprocess.run( - ["bash", "-c", f"nix store prefetch-file {url} --json | jq -r '.hash'"], - capture_output=True, - encoding="locale", - ).stdout.strip() # Has a newline + systems[nixTuple]["newHash"] = subprocess.run( + ["bash", "-c", f"nix store prefetch-file {url} --json | jq -r '.hash'"], + capture_output=True, + encoding="locale", + ).stdout.strip() # Has a newline with open(join(dirname(__file__), "package.nix"), "r") as f: - file = f.read() + file = f.read() for nixTuple in systems: - file = file.replace(systems[nixTuple]["oldHash"], systems[nixTuple]["newHash"]) + file = file.replace(systems[nixTuple]["oldHash"], systems[nixTuple]["newHash"]) with open(join(dirname(__file__), "package.nix"), "w") as f: - f.write(file) + f.write(file) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/__main__.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/__main__.py index 5db025f72010d..142847f472bff 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/__main__.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/__main__.py @@ -22,9 +22,7 @@ def main_impl(file_path): debug("exclude_paths", exclude_paths) result = flatten_references_graph( - references_graph, - pipeline, - exclude_paths=exclude_paths + references_graph, pipeline, exclude_paths=exclude_paths ) debug("result", result) @@ -35,7 +33,7 @@ def main_impl(file_path): sort_keys=True, indent=2, # Avoid tailing whitespaces. - separators=(",", ": ") + separators=(",", ": "), ) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/__main___test.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/__main___test.py index d04e44bd4af2f..bdf0bed4fb776 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/__main___test.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/__main___test.py @@ -9,12 +9,9 @@ class TestMain(unittest.TestCase): - def test_main_impl(self): - file_path = path_relative_to_file( - __file__, - "__test_fixtures/flatten-references-graph-main-input.json" + __file__, "__test_fixtures/flatten-references-graph-main-input.json" ) result = main_impl(file_path) @@ -35,18 +32,15 @@ def test_main_impl(self): ] ] """ - ) + ), ) def test_main_impl2(self): file_path = path_relative_to_file( __file__, - "__test_fixtures/flatten-references-graph-main-input-no-paths.json" + "__test_fixtures/flatten-references-graph-main-input-no-paths.json", ) result = main_impl(file_path) - self.assertEqual( - result, - inspect.cleandoc("[]") - ) + self.assertEqual(result, inspect.cleandoc("[]")) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/flatten_references_graph.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/flatten_references_graph.py index ac789022a32b0..d13f2044de3bc 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/flatten_references_graph.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/flatten_references_graph.py @@ -1,10 +1,6 @@ from toolz import curried as tlz -from .lib import ( - flatten, - over, - references_graph_to_igraph -) +from .lib import flatten, over, references_graph_to_igraph from .pipe import pipe @@ -18,10 +14,7 @@ def create_list_of_lists_of_strings(deeply_nested_lists_or_dicts_of_graphs): filter( # remove empty layers lambda xs: len(xs) > 0, - tlz.map( - lambda g: g.vs["name"], - list_of_graphs - ) + tlz.map(lambda g: g.vs["name"], list_of_graphs), ) ) @@ -30,16 +23,12 @@ def flatten_references_graph(references_graph, pipeline, exclude_paths=None): if exclude_paths is not None: exclude_paths = frozenset(exclude_paths) references_graph = tlz.compose( - tlz.map(over( - "references", - lambda xs: frozenset(xs).difference(exclude_paths) - )), - tlz.remove(lambda node: node["path"] in exclude_paths) + tlz.map( + over("references", lambda xs: frozenset(xs).difference(exclude_paths)) + ), + tlz.remove(lambda node: node["path"] in exclude_paths), )(references_graph) igraph_graph = references_graph_to_igraph(references_graph) - return create_list_of_lists_of_strings(pipe( - pipeline, - igraph_graph - )) + return create_list_of_lists_of_strings(pipe(pipeline, igraph_graph)) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/flatten_references_graph_test.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/flatten_references_graph_test.py index c39c663d038e2..80004a502bbb3 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/flatten_references_graph_test.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/flatten_references_graph_test.py @@ -14,41 +14,33 @@ "references": [ "A", "C", - ] + ], }, { "closureSize": 3, "narHash": "sha256:b", "narSize": 4, "path": "B", - "references": [ - "C", - "D" - ] + "references": ["C", "D"], }, { "closureSize": 5, "narHash": "sha256:c", "narSize": 6, "path": "C", - "references": [ - "C" - ] + "references": ["C"], }, { "closureSize": 7, "narHash": "sha256:d", "narSize": 8, "path": "D", - "references": [ - "D" - ] - } + "references": ["D"], + }, ] class Test(unittest.TestCase): - def test_flatten_references_graph(self): pipeline = [ ["split_paths", ["B"]], @@ -64,8 +56,8 @@ def test_flatten_references_graph(self): # Common deps ["C"], # Rest (without common deps) - ["A"] - ] + ["A"], + ], ) pipeline = [ @@ -75,15 +67,7 @@ def test_flatten_references_graph(self): result = flatten_references_graph(references_graph, pipeline) - self.assertEqual( - result, - [ - ["B"], - ["D"], - ["C"], - ["A"] - ] - ) + self.assertEqual(result, [["B"], ["D"], ["C"], ["A"]]) def test_flatten_references_graph_exclude_paths(self): pipeline = [ @@ -91,9 +75,7 @@ def test_flatten_references_graph_exclude_paths(self): ] result = flatten_references_graph( - references_graph, - pipeline, - exclude_paths=["A"] + references_graph, pipeline, exclude_paths=["A"] ) self.assertEqual( @@ -101,13 +83,11 @@ def test_flatten_references_graph_exclude_paths(self): [ # A was excluded so there is no "rest" or "common" layer ["B", "C", "D"] - ] + ], ) result = flatten_references_graph( - references_graph, - pipeline, - exclude_paths=["D"] + references_graph, pipeline, exclude_paths=["D"] ) self.assertEqual( @@ -116,6 +96,6 @@ def test_flatten_references_graph_exclude_paths(self): # D removed from this layer ["B"], ["C"], - ["A"] - ] + ["A"], + ], ) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/lib.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/lib.py index e3277177f401a..3b1c3e7d77931 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/lib.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/lib.py @@ -36,7 +36,8 @@ def debug_plot(graph, name, **kwargs): ] save_as = ( - None if DEBUG_PLOT_SAVE_BASE_NAME is None + None + if DEBUG_PLOT_SAVE_BASE_NAME is None else DEBUG_PLOT_SAVE_BASE_NAME + name + ".png" ) @@ -44,19 +45,21 @@ def debug_plot(graph, name, **kwargs): graph, save_as, vertex_label=vertex_label, - **(tlz.merge( - { - # "bbox": (3840, 2160), - "bbox": (800, 600), - "margin": 100, - "vertex_label_dist": -5, - "edge_color": "orange", - "vertex_size": 20, - "vertex_label_size": 30, - "edge_arrow_size": 2 - }, - kwargs - )), + **( + tlz.merge( + { + # "bbox": (3840, 2160), + "bbox": (800, 600), + "margin": 100, + "vertex_label_dist": -5, + "edge_color": "orange", + "vertex_size": 20, + "vertex_label_size": 30, + "edge_arrow_size": 2, + }, + kwargs, + ) + ), ) @@ -65,18 +68,13 @@ def debug_plot_with_highligth(g, vs, layout): g, layout=layout, # layout=Layout(new_coords), - vertex_color=[ - "green" if v.index in vs else "red" - for v in g.vs - ] + vertex_color=["green" if v.index in vs else "red" for v in g.vs], ) @curry def pick_keys(keys, d): - return { - key: d[key] for key in keys if key in d - } + return {key: d[key] for key in keys if key in d} def unnest_iterable(xs): @@ -107,10 +105,7 @@ def subcomponent_multi(graph, vertices, mode="out"): """Return concatenated subcomponents generated by the given list of vertices. """ - return tlz.mapcat( - lambda vertex: graph.subcomponent(vertex, mode=mode), - vertices - ) + return tlz.mapcat(lambda vertex: graph.subcomponent(vertex, mode=mode), vertices) @curry @@ -122,17 +117,14 @@ def edges_for_reference_graph_node(path_to_size_dict, reference_graph_node): filter( # references might contain source lambda x: x != source, - reference_graph_node["references"] + reference_graph_node["references"], ), - key=lambda x: 1 * path_to_size_dict[x] - ) + key=lambda x: 1 * path_to_size_dict[x], + ), ) -reference_graph_node_keys_to_keep = [ - "closureSize", - "narSize" -] +reference_graph_node_keys_to_keep = ["closureSize", "narSize"] pick_reference_graph_node_keys = pick_keys(reference_graph_node_keys_to_keep) @@ -140,7 +132,7 @@ def edges_for_reference_graph_node(path_to_size_dict, reference_graph_node): def vertex_from_reference_graph_node(reference_graph_node): return tlz.merge( {"name": reference_graph_node["path"]}, - pick_reference_graph_node_keys(reference_graph_node) + pick_reference_graph_node_keys(reference_graph_node), ) @@ -150,7 +142,7 @@ def references_graph_to_igraph(references_graph): Uses paths as igraph node names, and sets closureSize and narSize as properties of igraph nodes. """ - debug('references_graph', references_graph) + debug("references_graph", references_graph) references_graph = sorted(references_graph, key=lambda x: 1 * x["narSize"]) # Short circuit since DictList throws an error if first argument (vertices) @@ -161,19 +153,16 @@ def references_graph_to_igraph(references_graph): if len(references_graph) == 0: return empty_directed_graph() - path_to_size_dict = { - node["path"]: node["narSize"] for node in references_graph - } + path_to_size_dict = {node["path"]: node["narSize"] for node in references_graph} - debug('path_to_size_dict', path_to_size_dict) + debug("path_to_size_dict", path_to_size_dict) return igraph.Graph.DictList( map(vertex_from_reference_graph_node, references_graph), - unnest_iterable(map( - edges_for_reference_graph_node(path_to_size_dict), - references_graph - )), - directed=True + unnest_iterable( + map(edges_for_reference_graph_node(path_to_size_dict), references_graph) + ), + directed=True, ) @@ -187,12 +176,14 @@ def igraph_to_reference_graph(igraph_instance): tlz.merge( { "path": v["name"], - "references": list(map( - graph_vertex_index_to_name(igraph_instance), - igraph_instance.successors(v.index) - )) + "references": list( + map( + graph_vertex_index_to_name(igraph_instance), + igraph_instance.successors(v.index), + ) + ), }, - pick_reference_graph_node_keys(v.attributes()) + pick_reference_graph_node_keys(v.attributes()), ) for v in igraph_instance.vs ] @@ -239,10 +230,10 @@ def directed_graph(edges, vertices=None, vertex_attrs=[]): graph = graph + vertices # Add vertex attributes if any. - for (name, attrs_dict) in vertex_attrs: + for name, attrs_dict in vertex_attrs: vertex = graph.vs.find(name) - for (k, v) in attrs_dict.items(): + for k, v in attrs_dict.items(): vertex[k] = v return graph @@ -288,10 +279,7 @@ def flatten(xs): @curry def split_every(count, graph): vs = graph.vs - return [ - graph.induced_subgraph(vs[x:x + count]) - for x in range(0, len(vs), count) - ] + return [graph.induced_subgraph(vs[x : x + count]) for x in range(0, len(vs), count)] @curry @@ -300,12 +288,14 @@ def limit_layers(max_count, graphs): graphs_iterator = iter(graphs) - return tlz.concat([ - tlz.take(max_count - 1, graphs_iterator), - # Merges all graphs remaining in the iterator, after initial - # max_count - 1 have been taken. - (lambda: (yield merge_graphs(graphs_iterator)))() - ]) + return tlz.concat( + [ + tlz.take(max_count - 1, graphs_iterator), + # Merges all graphs remaining in the iterator, after initial + # max_count - 1 have been taken. + (lambda: (yield merge_graphs(graphs_iterator)))(), + ] + ) @curry @@ -318,7 +308,7 @@ def remove_paths(paths, graph): list, tlz.map(lambda v: v.index), tlz.remove(is_None), - tlz.map(find_vertex_by_name_or_none(graph)) + tlz.map(find_vertex_by_name_or_none(graph)), )(paths) return graph - indices_to_remove if len(indices_to_remove) > 0 else graph diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/lib_test.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/lib_test.py index 49099e7f0b5b3..b975aacd98d02 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/lib_test.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/lib_test.py @@ -10,7 +10,7 @@ limit_layers, pick_keys, references_graph_to_igraph, - reference_graph_node_keys_to_keep + reference_graph_node_keys_to_keep, ) if __name__ == "__main__": @@ -23,27 +23,21 @@ "narHash": "sha256:d", "narSize": 0, "path": "D", - "references": [ - "D" - ] + "references": ["D"], }, { "closureSize": 3, "narHash": "sha256:b", "narSize": 4, "path": "B", - "references": [ - "B" - ] + "references": ["B"], }, { "closureSize": 3, "narHash": "sha256:e", "narSize": 5, "path": "E", - "references": [ - "E" - ] + "references": ["E"], }, { "closureSize": 1, @@ -54,35 +48,27 @@ # most of the time references contain self path, but not always. "C", "B", - ] + ], }, { "closureSize": 5, "narHash": "sha256:c", "narSize": 6, "path": "C", - "references": [ - "C", - "E", - "D" - ] + "references": ["C", "E", "D"], }, { "closureSize": 5, "narHash": "sha256:f", "narSize": 2, "path": "F", - "references": [ - "F" - ] - } + "references": ["F"], + }, ] class TestLib(unittest.TestCase, th.CustomAssertions): - def test_references_graph_to_igraph(self): - graph = references_graph_to_igraph(references_graph) pick_preserved_keys = pick_keys(reference_graph_node_keys_to_keep) @@ -100,20 +86,19 @@ def test_references_graph_to_igraph(self): # Add "narSize" and "closureSize" attributes to each node. map( lambda node: (node["path"], pick_preserved_keys(node)), - references_graph - ) - ) + references_graph, + ), + ), ) def test_references_graph_to_igraph_one_node(self): - references_graph = [ { - 'closureSize': 168, - 'narHash': 'sha256:0dl4', - 'narSize': 168, - 'path': 'A', - 'references': [] + "closureSize": 168, + "narHash": "sha256:0dl4", + "narSize": 168, + "path": "A", + "references": [], } ] @@ -129,59 +114,41 @@ def test_references_graph_to_igraph_one_node(self): # Add "narSize" and "closureSize" attributes to each node. map( lambda node: (node["path"], pick_preserved_keys(node)), - references_graph - ) - ) + references_graph, + ), + ), ) def test_references_graph_to_igraph_zero_nodes(self): - references_graph = [] graph = references_graph_to_igraph(references_graph) - self.assertGraphEqual( - graph, - directed_graph( - [], - [], - [] - ) - ) + self.assertGraphEqual(graph, directed_graph([], [], [])) def test_igraph_to_reference_graph(self): - graph = references_graph_to_igraph(references_graph) - nodes_by_path = { - node["path"]: node for node in references_graph - } + nodes_by_path = {node["path"]: node for node in references_graph} result = igraph_to_reference_graph(graph) - self.assertEqual( - len(result), - len(references_graph) - ) + self.assertEqual(len(result), len(references_graph)) - pick_preserved_keys = pick_keys([ - "path", - *reference_graph_node_keys_to_keep - ]) + pick_preserved_keys = pick_keys(["path", *reference_graph_node_keys_to_keep]) for node in result: original_node = nodes_by_path[node["path"]] self.assertDictEqual( - pick_preserved_keys(original_node), - pick_preserved_keys(node) + pick_preserved_keys(original_node), pick_preserved_keys(node) ) revove_self_ref = tlz.remove(lambda a: a == node["path"]) self.assertListEqual( sorted(node["references"]), - sorted(revove_self_ref(original_node["references"])) + sorted(revove_self_ref(original_node["references"])), ) def test_limit_layers_nothing_to_do(self): @@ -191,9 +158,6 @@ def test_limit_layers_nothing_to_do(self): result = limit_layers(1, layers) result_list = list(result) - self.assertEqual( - len(result_list), - 1 - ) + self.assertEqual(len(result_list), 1) self.assertGraphEqual(graph, result_list[0]) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/pipe.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/pipe.py index f4d31397756c0..ef7ad3642f13a 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/pipe.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/pipe.py @@ -9,33 +9,26 @@ from .lib import ( # references_graph_to_igraph debug, - pick_attrs + pick_attrs, ) funcs = tlz.merge( pick_attrs( - [ - "flatten", - "over", - "split_every", - "limit_layers", - "remove_paths", - "reverse" - ], - lib + ["flatten", "over", "split_every", "limit_layers", "remove_paths", "reverse"], + lib, ), pick_attrs( [ "subcomponent_in", "subcomponent_out", ], - subcomponent + subcomponent, ), { "split_paths": split_paths, "popularity_contest": popularity_contest, - "map": tlz.map - } + "map": tlz.map, + }, ) @@ -70,11 +63,8 @@ def preapply_func(func_call_data): def pipe(pipeline, data): debug("pipeline", pipeline) partial_funcs = list(tlz.map(preapply_func, pipeline)) - debug('partial_funcs', partial_funcs) - return tlz.pipe( - data, - *partial_funcs - ) + debug("partial_funcs", partial_funcs) + return tlz.pipe(data, *partial_funcs) funcs["pipe"] = pipe diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/pipe_test.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/pipe_test.py index ab03fa2db66d8..673fd6911aad8 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/pipe_test.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/pipe_test.py @@ -32,24 +32,15 @@ class CustomAssertions: def runAndAssertResult(self, graph, pipeline, expected_graph_args): result = list(pipe(pipeline, graph)) - for (index, expected_graph_arg) in enumerate(expected_graph_args): - - self.assertGraphEqual( - directed_graph(*expected_graph_arg), - result[index] - ) + for index, expected_graph_arg in enumerate(expected_graph_args): + self.assertGraphEqual(directed_graph(*expected_graph_arg), result[index]) if __name__ == "__main__": unittest.main() -class Test( - unittest.TestCase, - CustomAssertions, - th.CustomAssertions -): - +class Test(unittest.TestCase, CustomAssertions, th.CustomAssertions): def test_1(self): pipeline = [ ["split_paths", ["B"]], @@ -60,13 +51,9 @@ def test_1(self): "pipe", [ ["subcomponent_in", ["B"]], - [ - "over", - "rest", - ["popularity_contest"] - ] - ] - ] + ["over", "rest", ["popularity_contest"]], + ], + ], ], ["flatten"], ["map", ["remove_paths", "Root3"]], @@ -84,20 +71,10 @@ def test_1(self): ([], ["E"]), # "rest" output of "split_paths" stage with "G" merged into it by # "limit_layers" stage. - ( - [ - ("Root1", "A"), - ("A", "C") - ], - ["Root2", "G"] - ) + ([("Root1", "A"), ("A", "C")], ["Root2", "G"]), ] - self.runAndAssertResult( - make_test_graph(), - pipeline, - expected_graph_args - ) + self.runAndAssertResult(make_test_graph(), pipeline, expected_graph_args) def test_2(self): graph = directed_graph( @@ -105,7 +82,7 @@ def test_2(self): ("Root1", "A"), ("A", "B"), ], - ["Root2"] + ["Root2"], ) self.runAndAssertResult( graph, @@ -117,8 +94,8 @@ def test_2(self): ([], ["B"]), ([], ["A"]), ([], ["Root1"]), - ([], ["Root2"]) - ] + ([], ["Root2"]), + ], ) self.runAndAssertResult( @@ -133,7 +110,7 @@ def test_2(self): ([], ["A"]), # Least popular combined ([], ["Root1", "Root2"]), - ] + ], ) self.runAndAssertResult( @@ -148,6 +125,6 @@ def test_2(self): ([], ["Root2"]), ([], ["Root1"]), # Most popular first - ([], ["A", "B"]) - ] + ([], ["A", "B"]), + ], ) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/popularity_contest.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/popularity_contest.py index c8447d6554795..f125dc6062c99 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/popularity_contest.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/popularity_contest.py @@ -117,7 +117,7 @@ igraph_to_reference_graph, over, pick_keys, - reference_graph_node_keys_to_keep + reference_graph_node_keys_to_keep, ) eq = curry(eq) @@ -128,11 +128,11 @@ # Find paths in the original dataset which are never referenced by # any other paths def find_roots(closures): - debug('closures', closures) + debug("closures", closures) roots = [] for closure in closures: - path = closure['path'] + path = closure["path"] if not any_refer_to(path, closures): roots.append(path) @@ -141,8 +141,8 @@ def find_roots(closures): def any_refer_to(path, closures): for closure in closures: - if path != closure['path']: - if path in closure['references']: + if path != closure["path"]: + if path in closure["references"]: return True return False @@ -150,8 +150,8 @@ def any_refer_to(path, closures): def all_paths(closures): paths = [] for closure in closures: - paths.append(closure['path']) - paths.extend(closure['references']) + paths.append(closure["path"]) + paths.extend(closure["references"]) paths.sort() return list(set(paths)) @@ -252,6 +252,7 @@ def make_graph_segment_from_root(subgraphs_cache, root, lookup): # /nix/store/tux: 6 # ] + def graph_popularity_contest(popularity_cache, full_graph): popularity = defaultdict(int) for path, subgraph in full_graph.items(): @@ -277,6 +278,7 @@ def graph_popularity_contest(popularity_cache, full_graph): return popularity + # Emit a list of packages by popularity, most first: # # From: @@ -310,12 +312,12 @@ def order_by_popularity(paths): def package_name(path): - parts = path.split('-') + parts = path.split("-") start = parts.pop(0) # don't throw away any data, so the order is always the same. # even in cases where only the hash at the start has changed. parts.append(start) - return '-'.join(parts) + return "-".join(parts) @curry @@ -344,7 +346,7 @@ def popularity_contest(graph): # with v["name"] == path, and some properties (defined in # reference_graph_node_keys_to_keep) from the nodes of the input graph # copied as vertex attributes. - debug('graph', graph) + debug("graph", graph) if isinstance(graph, igraph.Graph): graph = igraph_to_reference_graph(graph) @@ -360,12 +362,7 @@ def popularity_contest(graph): for root in roots: debug("Making full graph for", root) full_graph[root] = make_graph_segment_from_root( - subgraphs_cache, - root, - tlz.valmap( - tlz.get("references"), - lookup - ) + subgraphs_cache, root, tlz.valmap(tlz.get("references"), lookup) ) debug("Running contest") @@ -391,7 +388,7 @@ def popularity_contest(graph): # One vertex, with name=path [path], # Setting desired attributes on the vertex. - [(path, pick_keys_to_keep(lookup[path]))] + [(path, pick_keys_to_keep(lookup[path]))], ), - ordered + ordered, ) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/popularity_contest_test.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/popularity_contest_test.py index 5ed58eee41969..59f0b39335c8b 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/popularity_contest_test.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/popularity_contest_test.py @@ -12,14 +12,10 @@ make_graph_segment_from_root, make_lookup, popularity_contest, - order_by_popularity + order_by_popularity, ) -from .lib import ( - directed_graph, - igraph_to_reference_graph, - over -) +from .lib import directed_graph, igraph_to_reference_graph, over if __name__ == "__main__": @@ -29,26 +25,15 @@ class CustomAssertions: @curry def assertResultKeys(self, keys, result): - self.assertListEqual( - list(result.keys()), - keys - ) + self.assertListEqual(list(result.keys()), keys) return result -class Test( - unittest.TestCase, - CustomAssertions, - th.CustomAssertions -): - +class Test(unittest.TestCase, CustomAssertions, th.CustomAssertions): def test_empty_graph(self): def test_empty(graph): - self.assertListEqual( - list(popularity_contest(graph)), - [] - ) + self.assertListEqual(list(popularity_contest(graph)), []) # popularity_contest works with igraph graph or refurence_graph in # form a list of dicts (as returned by nix's exportReferencesGraph) @@ -70,7 +55,7 @@ def test_popularity_contest(self): ("B", "D"), ("B", "F"), ("Root2", "B"), - ("Root3", "C") + ("Root3", "C"), ] detached_vertices = ["X"] vertex_props = vertex_props_dict.items() @@ -79,29 +64,25 @@ def test(graph): result = list(popularity_contest(graph)) expected_paths = [ - 'E', - 'D', - 'F', - 'B', - 'A', - 'C', - 'Root1', - 'Root2', - 'Root3', - 'X' + "E", + "D", + "F", + "B", + "A", + "C", + "Root1", + "Root2", + "Root3", + "X", ] - self.assertEqual( - len(result), - len(expected_paths) - ) + self.assertEqual(len(result), len(expected_paths)) - for (index, path) in enumerate(expected_paths): + for index, path in enumerate(expected_paths): path_props = vertex_props_dict.get(path) or {} self.assertGraphEqual( - result[index], - directed_graph([], [path], [(path, path_props)]) + result[index], directed_graph([], [path], [(path, path_props)]) ) graph = directed_graph(edges, detached_vertices, vertex_props) @@ -113,28 +94,20 @@ def test(graph): class TestFindRoots(unittest.TestCase): def test_find_roots(self): self.assertCountEqual( - find_roots([ - { - "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] - }, - { - "path": "/nix/store/bar", - "references": [ - "/nix/store/bar", - "/nix/store/tux" - ] - }, - { - "path": "/nix/store/hello", - "references": [ - ] - } - ]), - ["/nix/store/foo", "/nix/store/hello"] + find_roots( + [ + { + "path": "/nix/store/foo", + "references": ["/nix/store/foo", "/nix/store/bar"], + }, + { + "path": "/nix/store/bar", + "references": ["/nix/store/bar", "/nix/store/tux"], + }, + {"path": "/nix/store/hello", "references": []}, + ] + ), + ["/nix/store/foo", "/nix/store/hello"], ) @@ -144,13 +117,8 @@ def test_has_references(self): any_refer_to( "/nix/store/bar", [ - { - "path": "/nix/store/foo", - "references": [ - "/nix/store/bar" - ] - }, - ] + {"path": "/nix/store/foo", "references": ["/nix/store/bar"]}, + ], ), ) @@ -161,12 +129,9 @@ def test_no_references(self): [ { "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] + "references": ["/nix/store/foo", "/nix/store/bar"], }, - ] + ], ), ) @@ -174,29 +139,25 @@ def test_no_references(self): class TestAllPaths(unittest.TestCase): def test_returns_all_paths(self): self.assertCountEqual( - all_paths([ - { - "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] - }, - { - "path": "/nix/store/bar", - "references": [ - "/nix/store/bar", - "/nix/store/tux" - ] - }, - { - "path": "/nix/store/hello", - "references": [ - ] - } - ]), - ["/nix/store/foo", "/nix/store/bar", - "/nix/store/hello", "/nix/store/tux", ] + all_paths( + [ + { + "path": "/nix/store/foo", + "references": ["/nix/store/foo", "/nix/store/bar"], + }, + { + "path": "/nix/store/bar", + "references": ["/nix/store/bar", "/nix/store/tux"], + }, + {"path": "/nix/store/hello", "references": []}, + ] + ), + [ + "/nix/store/foo", + "/nix/store/bar", + "/nix/store/hello", + "/nix/store/tux", + ], ) def test_no_references(self): @@ -206,12 +167,9 @@ def test_no_references(self): [ { "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar" - ] + "references": ["/nix/store/foo", "/nix/store/bar"], }, - ] + ], ), ) @@ -221,115 +179,102 @@ def test_returns_lookp(self): self.assertDictEqual( # "references" in the result are iterators so we need # to convert them to a list before asserting. - tlz.valmap(over("references", list), make_lookup([ - { - "path": "/nix/store/foo", - "references": [ - "/nix/store/foo", - "/nix/store/bar", - "/nix/store/hello" - ] - }, - { - "path": "/nix/store/bar", - "references": [ - "/nix/store/bar", - "/nix/store/tux" + tlz.valmap( + over("references", list), + make_lookup( + [ + { + "path": "/nix/store/foo", + "references": [ + "/nix/store/foo", + "/nix/store/bar", + "/nix/store/hello", + ], + }, + { + "path": "/nix/store/bar", + "references": ["/nix/store/bar", "/nix/store/tux"], + }, + {"path": "/nix/store/hello", "references": []}, ] - }, - { - "path": "/nix/store/hello", - "references": [ - ] - } - ])), + ), + ), { "/nix/store/foo": { "path": "/nix/store/foo", - "references": [ - "/nix/store/bar", - "/nix/store/hello" - ] + "references": ["/nix/store/bar", "/nix/store/hello"], }, "/nix/store/bar": { "path": "/nix/store/bar", - "references": [ - "/nix/store/tux" - ] + "references": ["/nix/store/tux"], }, - "/nix/store/hello": { - "path": "/nix/store/hello", - "references": [ - ] - } - } + "/nix/store/hello": {"path": "/nix/store/hello", "references": []}, + }, ) class TestMakeGraphSegmentFromRoot(unittest.TestCase): def test_returns_graph(self): self.assertDictEqual( - make_graph_segment_from_root({}, "/nix/store/foo", { - "/nix/store/foo": ["/nix/store/bar"], - "/nix/store/bar": ["/nix/store/tux"], - "/nix/store/tux": [], - "/nix/store/hello": [], - }), - { - "/nix/store/bar": { - "/nix/store/tux": {} - } - } + make_graph_segment_from_root( + {}, + "/nix/store/foo", + { + "/nix/store/foo": ["/nix/store/bar"], + "/nix/store/bar": ["/nix/store/tux"], + "/nix/store/tux": [], + "/nix/store/hello": [], + }, + ), + {"/nix/store/bar": {"/nix/store/tux": {}}}, ) def test_returns_graph_tiny(self): self.assertDictEqual( - make_graph_segment_from_root({}, "/nix/store/tux", { - "/nix/store/foo": ["/nix/store/bar"], - "/nix/store/bar": ["/nix/store/tux"], - "/nix/store/tux": [], - }), - {} + make_graph_segment_from_root( + {}, + "/nix/store/tux", + { + "/nix/store/foo": ["/nix/store/bar"], + "/nix/store/bar": ["/nix/store/tux"], + "/nix/store/tux": [], + }, + ), + {}, ) class TestGraphPopularityContest(unittest.TestCase): def test_counts_popularity(self): self.assertDictEqual( - graph_popularity_contest({}, { - "/nix/store/foo": { - "/nix/store/bar": { - "/nix/store/baz": { - "/nix/store/tux": {} - } - }, - "/nix/store/baz": { - "/nix/store/tux": {} + graph_popularity_contest( + {}, + { + "/nix/store/foo": { + "/nix/store/bar": {"/nix/store/baz": {"/nix/store/tux": {}}}, + "/nix/store/baz": {"/nix/store/tux": {}}, } - } - }), + }, + ), { "/nix/store/foo": 1, "/nix/store/bar": 2, "/nix/store/baz": 4, "/nix/store/tux": 6, - } + }, ) class TestOrderByPopularity(unittest.TestCase): def test_returns_in_order(self): self.assertEqual( - order_by_popularity({ - "/nix/store/foo": 1, - "/nix/store/bar": 1, - "/nix/store/baz": 2, - "/nix/store/tux": 2, - }), - [ - "/nix/store/baz", - "/nix/store/tux", - "/nix/store/bar", - "/nix/store/foo" - ] + order_by_popularity( + { + "/nix/store/foo": 1, + "/nix/store/bar": 1, + "/nix/store/baz": 2, + "/nix/store/tux": 2, + } + ), + ["/nix/store/baz", "/nix/store/tux", "/nix/store/bar", "/nix/store/foo"], ) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/split_paths.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/split_paths.py index 31595c950c9ac..8c594d4ff897e 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/split_paths.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/split_paths.py @@ -9,7 +9,7 @@ graph_is_empty, is_None, subcomponent_multi, - unnest_iterable + unnest_iterable, ) @@ -45,16 +45,12 @@ def remove_vertex(vertex_name, graph): def get_children_of(graph, vertex_names): - return unnest_iterable(map( - graph.successors, - tlz.remove( - is_None, - map( - find_vertex_by_name_or_none(graph), - vertex_names - ) + return unnest_iterable( + map( + graph.successors, + tlz.remove(is_None, map(find_vertex_by_name_or_none(graph), vertex_names)), ) - )) + ) def as_list(x): @@ -72,7 +68,7 @@ def split_path_spec_to_indices(graph, split_path_spec): else: raise Exception( "Unexpected split path spec: dict with invalid keys." - "Valid: [\"children_of\"]" + 'Valid: ["children_of"]' ) else: vertex = find_vertex_by_name_or_none(graph)(split_path_spec) @@ -96,10 +92,9 @@ def split_paths(split_paths, graph_in): # Convert list of split_paths into list of vertex indices. Ignores # split_paths which don"t match any vertices in the graph. # All edges pointing at the indices will be deleted from the graph. - split_path_indices = list(unnest_iterable(map( - split_path_spec_to_indices(graph_in), - split_paths - ))) + split_path_indices = list( + unnest_iterable(map(split_path_spec_to_indices(graph_in), split_paths)) + ) debug("split_path_indices:", split_path_indices) @@ -107,7 +102,7 @@ def split_paths(split_paths, graph_in): # vertices in the graph). if len(split_path_indices) == 0: if DEBUG_PLOT: - layout = graph_in.layout('tree') + layout = graph_in.layout("tree") debug_plot(graph_in, f"{graph_name_prefix}input", layout=layout) debug_plot(graph_in, f"{graph_name_prefix}result", layout=layout) @@ -121,18 +116,15 @@ def split_paths(split_paths, graph_in): debug("root_name", root_name) - if ( - find_vertex_by_name_or_none(graph)(root_name).index - in split_path_indices - ): + if find_vertex_by_name_or_none(graph)(root_name).index in split_path_indices: if DEBUG_PLOT: - layout = graph_in.layout('tree') + layout = graph_in.layout("tree") debug_plot(graph_in, f"{graph_name_prefix}input", layout=layout) debug_plot( graph_in, f"{graph_name_prefix}result", layout=layout, - vertex_color="green" + vertex_color="green", ) return {"main": graph_in} @@ -143,14 +135,13 @@ def split_paths(split_paths, graph_in): graph = graph if graph is not graph_in else graph.copy() if DEBUG_PLOT: - layout = graph.layout('tree') + layout = graph.layout("tree") debug_plot(graph, f"{graph_name_prefix}input", layout=layout) # Get incidences of all vertices which can be reached split_path_indices # (including split_path_indices). This is a set of all split_paths and their # dependencies. - split_off_vertex_indices = frozenset( - subcomponent_multi(graph, split_path_indices)) + split_off_vertex_indices = frozenset(subcomponent_multi(graph, split_path_indices)) debug("split_off_vertex_indices", split_off_vertex_indices) # Delete edges which point at any of the vertices in split_path_indices. @@ -182,10 +173,11 @@ def split_paths(split_paths, graph_in): debug("split_off_without_common", split_off_without_common) if DEBUG_PLOT: + def choose_color(index): - if (index in split_off_without_common): + if index in split_off_without_common: return "green" - elif (index in rest_without_common): + elif index in rest_without_common: return "red" else: return "purple" @@ -196,7 +188,7 @@ def choose_color(index): graph, f"{graph_name_prefix}result", layout=layout, - vertex_color=vertex_color + vertex_color=vertex_color, ) # Return subgraphs based on calculated sets of vertices. @@ -212,16 +204,19 @@ def choose_color(index): graph.induced_subgraph(rest_without_common), ] - debug('result_values', result_values[0].vs["name"]) + debug("result_values", result_values[0].vs["name"]) return tlz.valfilter( tlz.complement(graph_is_empty), - dict(zip( - result_keys, - ( - result_values if root_name != fake_root_name - # If root was added, remove it - else tlz.map(remove_vertex(fake_root_name), result_values) + dict( + zip( + result_keys, + ( + result_values + if root_name != fake_root_name + # If root was added, remove it + else tlz.map(remove_vertex(fake_root_name), result_values) + ), ) - )) + ), ) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/split_paths_test.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/split_paths_test.py index a3c129f7b3826..5f62295e144e1 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/split_paths_test.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/split_paths_test.py @@ -3,14 +3,9 @@ from . import test_helpers as th -from .split_paths import ( - split_paths -) +from .split_paths import split_paths -from .lib import ( - directed_graph, - pick_keys -) +from .lib import directed_graph, pick_keys if __name__ == "__main__": @@ -18,11 +13,7 @@ # Making sure vertex attrs are preserved. -vertex_props_dict = { - "Root1": {"a": 1, "b": 1}, - "B": {"b": 2}, - "X": {"x": 3} -} +vertex_props_dict = {"Root1": {"a": 1, "b": 1}, "B": {"b": 2}, "X": {"x": 3}} def make_test_graph(): @@ -34,7 +25,7 @@ def make_test_graph(): ("B", "D"), ("B", "F"), ("Root2", "B"), - ("Root3", "C") + ("Root3", "C"), ] detached_vertices = ["X"] @@ -47,100 +38,65 @@ def make_test_graph(): class CustomAssertions: @curry def assertResultKeys(self, keys, result): - self.assertListEqual( - list(result.keys()), - keys - ) + self.assertListEqual(list(result.keys()), keys) return result -class Test( - unittest.TestCase, - CustomAssertions, - th.CustomAssertions -): - +class Test(unittest.TestCase, CustomAssertions, th.CustomAssertions): def test_empty_paths(self): input_graph = make_test_graph() - result = self.assertResultKeys( - ["rest"], - split_paths([], input_graph) - ) + result = self.assertResultKeys(["rest"], split_paths([], input_graph)) - self.assertGraphEqual( - result["rest"], - input_graph - ) + self.assertGraphEqual(result["rest"], input_graph) def test_empty_graph(self): empty_graph = directed_graph([]) def test_empty(paths): - result = self.assertResultKeys( - ["rest"], - split_paths(paths, empty_graph) - ) + result = self.assertResultKeys(["rest"], split_paths(paths, empty_graph)) - self.assertGraphEqual( - result["rest"], - empty_graph - ) + self.assertGraphEqual(result["rest"], empty_graph) test_empty([]) test_empty(["B"]) def test_split_paths_single(self): result = self.assertResultKeys( - ["main", "common", "rest"], - split_paths(["B"], make_test_graph()) + ["main", "common", "rest"], split_paths(["B"], make_test_graph()) ) self.assertGraphEqual( result["main"], directed_graph( - [ - ("B", "F") - ], - None, - pick_keys(["B"], vertex_props_dict).items() - ) + [("B", "F")], None, pick_keys(["B"], vertex_props_dict).items() + ), ) self.assertGraphEqual( result["rest"], directed_graph( - [ - ("Root1", "A"), - ("Root3", "C") - ], + [("Root1", "A"), ("Root3", "C")], ["Root2", "X"], - pick_keys(["Root1", "X"], vertex_props_dict).items() - ) + pick_keys(["Root1", "X"], vertex_props_dict).items(), + ), ) - self.assertGraphEqual( - result["common"], - directed_graph([("D", "E")]) - ) + self.assertGraphEqual(result["common"], directed_graph([("D", "E")])) def test_split_paths_multi(self): result = self.assertResultKeys( - ["main", "common", "rest"], - split_paths(["B", "Root3"], make_test_graph()) + ["main", "common", "rest"], split_paths(["B", "Root3"], make_test_graph()) ) self.assertGraphEqual( result["main"], directed_graph( - [ - ("B", "F"), - ("Root3", "C") - ], + [("B", "F"), ("Root3", "C")], None, - pick_keys(["B"], vertex_props_dict).items() - ) + pick_keys(["B"], vertex_props_dict).items(), + ), ) self.assertGraphEqual( @@ -148,25 +104,18 @@ def test_split_paths_multi(self): directed_graph( [("Root1", "A")], ["Root2", "X"], - pick_keys(["Root1", "X"], vertex_props_dict).items() - ) + pick_keys(["Root1", "X"], vertex_props_dict).items(), + ), ) - self.assertGraphEqual( - result["common"], - directed_graph([("D", "E")]) - ) + self.assertGraphEqual(result["common"], directed_graph([("D", "E")])) def test_split_no_common(self): result = self.assertResultKeys( - ["main", "rest"], - split_paths(["D"], make_test_graph()) + ["main", "rest"], split_paths(["D"], make_test_graph()) ) - self.assertGraphEqual( - result["main"], - directed_graph([("D", "E")]) - ) + self.assertGraphEqual(result["main"], directed_graph([("D", "E")])) self.assertGraphEqual( result["rest"], @@ -179,6 +128,6 @@ def test_split_no_common(self): ("Root3", "C"), ], ["X"], - pick_keys(["Root1", "B", "X"], vertex_props_dict).items() - ) + pick_keys(["Root1", "B", "X"], vertex_props_dict).items(), + ), ) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/subcomponent.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/subcomponent.py index 8c32127cb93dc..e9049227cd168 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/subcomponent.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/subcomponent.py @@ -8,14 +8,11 @@ DEBUG_PLOT, find_vertex_by_name_or_none, is_None, - subcomponent_multi + subcomponent_multi, ) -call_counts = { - "in": 0, - "out": 0 -} +call_counts = {"in": 0, "out": 0} @curry @@ -25,24 +22,25 @@ def subcomponent(mode, paths, graph): graph_name_prefix = f"subcomponent_{mode}_{call_counts[mode]}_" call_counts[mode] += 1 - layout = graph.layout('tree') + layout = graph.layout("tree") debug_plot(graph, f"{graph_name_prefix}input", layout=layout) path_indices = tlz.compose( - tlz.map(attrgetter('index')), + tlz.map(attrgetter("index")), tlz.remove(is_None), - tlz.map(find_vertex_by_name_or_none(graph)) + tlz.map(find_vertex_by_name_or_none(graph)), )(paths) debug("path_indices", path_indices) main_indices = list(subcomponent_multi(graph, path_indices, mode)) - debug('main_indices', main_indices) + debug("main_indices", main_indices) if DEBUG_PLOT: + def choose_color(index): - if (index in main_indices): + if index in main_indices: return "green" else: return "red" @@ -53,13 +51,10 @@ def choose_color(index): graph, f"{graph_name_prefix}result", layout=layout, - vertex_color=vertex_color + vertex_color=vertex_color, ) - return { - "main": graph.induced_subgraph(main_indices), - "rest": graph - main_indices - } + return {"main": graph.induced_subgraph(main_indices), "rest": graph - main_indices} subcomponent_in = subcomponent("in") diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/subcomponent_test.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/subcomponent_test.py index 1a6138af0ba1c..34c795c2be7eb 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/subcomponent_test.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/subcomponent_test.py @@ -2,16 +2,9 @@ from . import test_helpers as th -from .subcomponent import ( - subcomponent_out, - subcomponent_in -) +from .subcomponent import subcomponent_out, subcomponent_in -from .lib import ( - pick_keys, - directed_graph, - empty_directed_graph -) +from .lib import pick_keys, directed_graph, empty_directed_graph if __name__ == "__main__": @@ -19,11 +12,7 @@ # Making sure vertex attrs are preserved. -vertex_props_dict = { - "Root1": {"a": 1, "b": 1}, - "B": {"b": 2}, - "X": {"x": 3} -} +vertex_props_dict = {"Root1": {"a": 1, "b": 1}, "B": {"b": 2}, "X": {"x": 3}} def make_test_graph(): @@ -46,37 +35,21 @@ def make_test_graph(): class CustomAssertions: def assertResultKeys(self, result): - self.assertListEqual( - list(result.keys()), - ["main", "rest"] - ) + self.assertListEqual(list(result.keys()), ["main", "rest"]) return result -class Test( - unittest.TestCase, - CustomAssertions, - th.CustomAssertions -): - +class Test(unittest.TestCase, CustomAssertions, th.CustomAssertions): def test_empty_paths(self): def test(func): input_graph = make_test_graph() - result = self.assertResultKeys( - func([], input_graph) - ) + result = self.assertResultKeys(func([], input_graph)) - self.assertGraphEqual( - result["main"], - empty_directed_graph() - ) + self.assertGraphEqual(result["main"], empty_directed_graph()) - self.assertGraphEqual( - result["rest"], - input_graph - ) + self.assertGraphEqual(result["rest"], input_graph) test(subcomponent_out) test(subcomponent_in) @@ -86,19 +59,11 @@ def test(func): empty_graph = empty_directed_graph() def test_empty(paths): - result = self.assertResultKeys( - func(paths, empty_graph) - ) + result = self.assertResultKeys(func(paths, empty_graph)) - self.assertGraphEqual( - result["main"], - empty_graph - ) + self.assertGraphEqual(result["main"], empty_graph) - self.assertGraphEqual( - result["rest"], - empty_graph - ) + self.assertGraphEqual(result["rest"], empty_graph) test_empty([]) test_empty(["B"]) @@ -107,33 +72,24 @@ def test_empty(paths): test(subcomponent_in) def test_subcomponent_out(self): - result = self.assertResultKeys( - subcomponent_out(["B"], make_test_graph()) - ) + result = self.assertResultKeys(subcomponent_out(["B"], make_test_graph())) self.assertGraphEqual( result["main"], directed_graph( - [ - ("B", "D"), - ("B", "E") - ], + [("B", "D"), ("B", "E")], None, - pick_keys(["B"], vertex_props_dict).items() - ) + pick_keys(["B"], vertex_props_dict).items(), + ), ) self.assertGraphEqual( result["rest"], directed_graph( - [ - ("Root1", "A"), - ("A", "C"), - ("Root3", "C") - ], + [("Root1", "A"), ("A", "C"), ("Root3", "C")], ["Root2", "X"], - pick_keys(["Root1", "X"], vertex_props_dict).items() - ) + pick_keys(["Root1", "X"], vertex_props_dict).items(), + ), ) def test_subcomponent_out_multi(self): @@ -144,14 +100,10 @@ def test_subcomponent_out_multi(self): self.assertGraphEqual( result["main"], directed_graph( - [ - ("B", "D"), - ("B", "E"), - ("Root3", "C") - ], + [("B", "D"), ("B", "E"), ("Root3", "C")], None, - pick_keys(["B"], vertex_props_dict).items() - ) + pick_keys(["B"], vertex_props_dict).items(), + ), ) self.assertGraphEqual( @@ -159,26 +111,20 @@ def test_subcomponent_out_multi(self): directed_graph( [("Root1", "A")], ["Root2", "X"], - pick_keys(["Root1", "X"], vertex_props_dict).items() - ) + pick_keys(["Root1", "X"], vertex_props_dict).items(), + ), ) def test_subcomponent_in(self): - result = self.assertResultKeys( - subcomponent_in(["B"], make_test_graph()) - ) + result = self.assertResultKeys(subcomponent_in(["B"], make_test_graph())) self.assertGraphEqual( result["main"], directed_graph( - [ - ("Root1", "A"), - ("A", "B"), - ("Root2", "B") - ], + [("Root1", "A"), ("A", "B"), ("Root2", "B")], None, - pick_keys(["Root1", "B"], vertex_props_dict).items() - ) + pick_keys(["Root1", "B"], vertex_props_dict).items(), + ), ) self.assertGraphEqual( @@ -186,8 +132,8 @@ def test_subcomponent_in(self): directed_graph( [("Root3", "C")], ["D", "E", "X"], - pick_keys(["X"], vertex_props_dict).items() - ) + pick_keys(["X"], vertex_props_dict).items(), + ), ) def test_subcomponent_in_multi(self): @@ -204,16 +150,13 @@ def test_subcomponent_in_multi(self): ("Root2", "B"), ], ["Root3"], - pick_keys(["Root1", "B"], vertex_props_dict).items() - - ) + pick_keys(["Root1", "B"], vertex_props_dict).items(), + ), ) self.assertGraphEqual( result["rest"], directed_graph( - [], - ["C", "D", "E", "X"], - pick_keys(["X"], vertex_props_dict).items() - ) + [], ["C", "D", "E", "X"], pick_keys(["X"], vertex_props_dict).items() + ), ) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/test_helpers.py b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/test_helpers.py index 8bbe48a4c4e12..aed35a1eeb76e 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/test_helpers.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/flatten_references_graph/test_helpers.py @@ -1,33 +1,26 @@ from toolz import curried as tlz -from .lib import ( - not_None, - graph_vertex_index_to_name -) +from .lib import not_None, graph_vertex_index_to_name def edges_as_set(graph): return frozenset( ( graph_vertex_index_to_name(graph, e.source), - graph_vertex_index_to_name(graph, e.target) - ) for e in graph.es + graph_vertex_index_to_name(graph, e.target), + ) + for e in graph.es ) class CustomAssertions: def assertGraphEqual(self, g1, g2): - self.assertSetEqual( - frozenset(g1.vs["name"]), - frozenset(g2.vs["name"]) - ) + self.assertSetEqual(frozenset(g1.vs["name"]), frozenset(g2.vs["name"])) - self.assertSetEqual( - edges_as_set(g1), - edges_as_set(g2) - ) + self.assertSetEqual(edges_as_set(g1), edges_as_set(g2)) for name in g1.vs["name"]: + def get_vertex_attrs(g): return tlz.valfilter(not_None, g.vs.find(name).attributes()) diff --git a/pkgs/by-name/fl/flattenReferencesGraph/src/setup.py b/pkgs/by-name/fl/flattenReferencesGraph/src/setup.py index 6e862d64c4f76..484789a3b9ce9 100644 --- a/pkgs/by-name/fl/flattenReferencesGraph/src/setup.py +++ b/pkgs/by-name/fl/flattenReferencesGraph/src/setup.py @@ -5,13 +5,10 @@ version="0.1.0", author="Adrian Gierakowski", packages=["flatten_references_graph"], - install_requires=[ - "igraph", - "toolz" - ], + install_requires=["igraph", "toolz"], entry_points={ "console_scripts": [ "flatten_references_graph=flatten_references_graph.__main__:main" ] - } + }, ) diff --git a/pkgs/by-name/fl/flet-client-flutter/update-lockfiles.py b/pkgs/by-name/fl/flet-client-flutter/update-lockfiles.py index cc35d251bd4e0..b7b24dc01ce09 100644 --- a/pkgs/by-name/fl/flet-client-flutter/update-lockfiles.py +++ b/pkgs/by-name/fl/flet-client-flutter/update-lockfiles.py @@ -7,12 +7,19 @@ THIS_FOLDER = Path(__file__).parent FLAKE_DIR = THIS_FOLDER while True: - assert str(FLAKE_DIR) != '/' + assert str(FLAKE_DIR) != "/" if (FLAKE_DIR / "flake.nix").exists(): break FLAKE_DIR = FLAKE_DIR.parent -source = Path(subprocess.run(['nix-build', FLAKE_DIR, '-A', 'flet-client-flutter.src', '--no-out-link'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()) +source = Path( + subprocess.run( + ["nix-build", FLAKE_DIR, "-A", "flet-client-flutter.src", "--no-out-link"], + stdout=subprocess.PIPE, + ) + .stdout.decode("utf-8") + .strip() +) assert source.is_absolute() source_pubspec_lock = source / "client" / "pubspec.lock" @@ -20,23 +27,28 @@ output_pubspec = THIS_FOLDER / "pubspec.lock.json" output_git_hashes = THIS_FOLDER / "git_hashes.json" -data = yaml.safe_load(source_pubspec_lock.open('r')) +data = yaml.safe_load(source_pubspec_lock.open("r")) output_pubspec.write_text(json.dumps(data, indent=2) + "\n") output_data = {} + def hash_git(package): print(package) - resolved_ref = package['resolved-ref'] - url = package['url'] - full_output = subprocess.run(['nix-prefetch-git', '--url', url, '--rev', resolved_ref], stdout=subprocess.PIPE).stdout.decode('utf-8') + resolved_ref = package["resolved-ref"] + url = package["url"] + full_output = subprocess.run( + ["nix-prefetch-git", "--url", url, "--rev", resolved_ref], + stdout=subprocess.PIPE, + ).stdout.decode("utf-8") json_output = json.loads(full_output) - return json_output['hash'] + return json_output["hash"] + -for name, package in data['packages'].items(): - if package['source'] != 'git': +for name, package in data["packages"].items(): + if package["source"] != "git": continue - hash = hash_git(package['description']) + hash = hash_git(package["description"]) output_data[name] = hash output_git_hashes.write_text(json.dumps(output_data, indent=2) + "\n") diff --git a/pkgs/by-name/gc/gclient2nix/gclient2nix.py b/pkgs/by-name/gc/gclient2nix/gclient2nix.py index b26ef247d2e44..23d9d6d3be4ed 100755 --- a/pkgs/by-name/gc/gclient2nix/gclient2nix.py +++ b/pkgs/by-name/gc/gclient2nix/gclient2nix.py @@ -27,13 +27,16 @@ memory: Memory = Memory(user_cache_dir("gclient2nix"), verbose=0) + def cache(mem, **mem_kwargs): def cache_(f): f.__module__ = "gclient2nix" f.__qualname__ = f.__name__ return mem.cache(f, **mem_kwargs) + return cache_ + @cache(memory) def get_repo_hash(fetcher: str, args: dict) -> str: expr = f"(import {nixpkgs_path} {{}}).gclient2nix.fetchers.{fetcher}{{" @@ -60,7 +63,9 @@ def get_deps(self, repo_vars: dict, path: str) -> None: ) deps_file = self.get_file("DEPS") - evaluated = gclient_eval.Parse(deps_file, vars_override=repo_vars, filename="DEPS") + evaluated = gclient_eval.Parse( + deps_file, vars_override=repo_vars, filename="DEPS" + ) repo_vars = dict(evaluated.get("vars", {})) | repo_vars @@ -86,12 +91,29 @@ def eval(self) -> None: self.get_deps( { **{ - f"checkout_{platform}": platform == "linux" - for platform in ["ios", "chromeos", "android", "mac", "win", "linux", "fuchsia"] + f"checkout_{platform}": platform == "linux" + for platform in [ + "ios", + "chromeos", + "android", + "mac", + "win", + "linux", + "fuchsia", + ] }, **{ - f"checkout_{arch}": True - for arch in ["x64", "arm64", "arm", "x86", "mips", "mips64", "ppc", "riscv64"] + f"checkout_{arch}": True + for arch in [ + "x64", + "arm64", + "arm", + "x86", + "mips", + "mips64", + "ppc", + "riscv64", + ] }, }, "", @@ -107,7 +129,13 @@ def prefetch_all(self) -> int: ) def flatten_repr(self) -> dict: - return {"fetcher": self.fetcher, "args": {**({"hash": self.hash} if hasattr(self, "hash") else {}), **self.args}} + return { + "fetcher": self.fetcher, + "args": { + **({"hash": self.hash} if hasattr(self, "hash") else {}), + **self.args, + }, + } def flatten(self, path: str) -> dict: out = {path: self.flatten_repr()} @@ -140,7 +168,9 @@ def __init__(self, owner: str, repo: str, rev: str) -> None: } def get_file(self, filepath: str) -> str: - rev_or_tag = self.args['rev'] if 'rev' in self.args else f"refs/tags/{self.args['tag']}" + rev_or_tag = ( + self.args["rev"] if "rev" in self.args else f"refs/tags/{self.args['tag']}" + ) return ( urlopen( f"https://raw.githubusercontent.com/{self.args['owner']}/{self.args['repo']}/{rev_or_tag}/{filepath}" @@ -166,22 +196,23 @@ def __init__(self, url: str, rev: str) -> None: # (making it count the compressed instead of uncompressed size) # rather than complying with it. if url == "https://chromium.googlesource.com/chromium/src.git": - self.args["postFetch"] = "rm -rf $(find $out/third_party/blink/web_tests ! -name BUILD.gn -mindepth 1 -maxdepth 1); " + self.args["postFetch"] = ( + "rm -rf $(find $out/third_party/blink/web_tests ! -name BUILD.gn -mindepth 1 -maxdepth 1); " + ) self.args["postFetch"] += "rm -r $out/content/test/data; " self.args["postFetch"] += "rm -rf $out/courgette/testdata; " self.args["postFetch"] += "rm -r $out/extensions/test/data; " self.args["postFetch"] += "rm -r $out/media/test/data; " def get_file(self, filepath: str) -> str: - rev_or_tag = self.args['rev'] if 'rev' in self.args else f"refs/tags/{self.args['tag']}" + rev_or_tag = ( + self.args["rev"] if "rev" in self.args else f"refs/tags/{self.args['tag']}" + ) return base64.b64decode( - urlopen( - f"{self.args['url']}/+/{rev_or_tag}/{filepath}?format=TEXT" - ).read() + urlopen(f"{self.args['url']}/+/{rev_or_tag}/{filepath}?format=TEXT").read() ).decode("utf-8") - def repo_from_dep(dep: dict) -> Optional[Repo]: if "url" in dep: url, rev = gclient_utils.SplitUrlRevision(dep["url"]) @@ -207,16 +238,22 @@ def cli() -> None: @cli.command("eval", help="Evaluate and print the dependency tree of a gclient project") @click.argument("url", required=True, type=str) -@click.option("--root", default="src", help="Root path, where the given url is placed", type=str) +@click.option( + "--root", default="src", help="Root path, where the given url is placed", type=str +) def eval(url: str, root: str) -> None: repo = repo_from_dep({"url": url}) repo.eval() print(json.dumps(repo.flatten(root), sort_keys=True, indent=4)) -@cli.command("generate", help="Generate a dependencies description for a gclient project") +@cli.command( + "generate", help="Generate a dependencies description for a gclient project" +) @click.argument("url", required=True, type=str) -@click.option("--root", default="src", help="Root path, where the given url is placed", type=str) +@click.option( + "--root", default="src", help="Root path, where the given url is placed", type=str +) def generate(url: str, root: str) -> None: repo = repo_from_dep({"url": url}) repo.eval() diff --git a/pkgs/by-name/gi/gitlab/update.py b/pkgs/by-name/gi/gitlab/update.py index 063aa30abb58d..5f1445c1c56c5 100755 --- a/pkgs/by-name/gi/gitlab/update.py +++ b/pkgs/by-name/gi/gitlab/update.py @@ -49,6 +49,7 @@ def tags(self) -> Iterable[str]: reverse=True, ) return versions + def get_git_hash(self, rev: str): return ( subprocess.check_output( @@ -112,7 +113,9 @@ def get_data(self, rev): version=self.rev2version(rev), repo_hash=self.get_git_hash(rev), yarn_hash=self.get_yarn_hash(rev), - frontend_islands_yarn_hash=self.get_yarn_hash(rev, "/ee/frontend_islands/yarn.lock"), + frontend_islands_yarn_hash=self.get_yarn_hash( + rev, "/ee/frontend_islands/yarn.lock" + ), owner=self.owner, repo=self.repo, rev=rev, @@ -177,7 +180,12 @@ def update_rubyenv(): # update to 1.2.9 to include https://gitlab.com/gitlab-org/ruby/gems/prometheus-client-mmap/-/commit/5d77f3f3e048834250589b416c6b3d4bba65a570 subprocess.check_output( - ["sed", "-i", "s:'prometheus-client-mmap', '~> 1.2.8':'prometheus-client-mmap', '~> 1.2.9':g", "Gemfile"], + [ + "sed", + "-i", + "s:'prometheus-client-mmap', '~> 1.2.8':'prometheus-client-mmap', '~> 1.2.9':g", + "Gemfile", + ], cwd=rubyenv_dir, ) @@ -192,7 +200,12 @@ def update_rubyenv(): # [comment]: https://gitlab.com/gitlab-org/gitlab/-/issues/468435#note_1979750600 # [upstream issue]: https://gitlab.com/gitlab-org/gitlab/-/issues/468435 subprocess.check_output( - ["sed", "-i", "s|gem 'sidekiq', path: 'vendor/gems/sidekiq', require: 'sidekiq'|gem 'sidekiq', '~> 7.3.9'|g", "Gemfile"], + [ + "sed", + "-i", + "s|gem 'sidekiq', path: 'vendor/gems/sidekiq', require: 'sidekiq'|gem 'sidekiq', '~> 7.3.9'|g", + "Gemfile", + ], cwd=rubyenv_dir, ) @@ -243,7 +256,9 @@ def update_rubyenv(): subprocess.check_output(["rm", "-rf", "vendor", "gems"], cwd=rubyenv_dir) # Reformat gemset.nix - subprocess.check_output(["nix-shell", "--run", "treefmt pkgs/by-name/gi/gitlab"], cwd=NIXPKGS_PATH) + subprocess.check_output( + ["nix-shell", "--run", "treefmt pkgs/by-name/gi/gitlab"], cwd=NIXPKGS_PATH + ) @cli.command("update-gitaly") @@ -251,14 +266,20 @@ def update_gitaly(): """Update gitaly""" logger.info("Updating gitaly") data = _get_data_json() - gitaly_server_version = data['passthru']['GITALY_SERVER_VERSION'] + gitaly_server_version = data["passthru"]["GITALY_SERVER_VERSION"] repo = GitLabRepo(repo="gitaly") - gitaly_dir = pathlib.Path(__file__).parent / 'gitaly' + gitaly_dir = pathlib.Path(__file__).parent / "gitaly" makefile = repo.get_file("Makefile", f"v{gitaly_server_version}") makefile += "\nprint-%:;@echo $($*)\n" - git_version = subprocess.run(["make", "-f", "-", "print-GIT_VERSION"], check=True, input=makefile, text=True, capture_output=True).stdout.strip() + git_version = subprocess.run( + ["make", "-f", "-", "print-GIT_VERSION"], + check=True, + input=makefile, + text=True, + capture_output=True, + ).stdout.strip() _call_nix_update("gitaly", gitaly_server_version) _call_nix_update("gitaly.git", git_version) @@ -331,21 +352,29 @@ def update_gitlab_container_registry(rev: str, commit: bool): ) -@cli.command('update-gitlab-elasticsearch-indexer') +@cli.command("update-gitlab-elasticsearch-indexer") def update_gitlab_elasticsearch_indexer(): """Update gitlab-elasticsearch-indexer""" data = _get_data_json() - gitlab_elasticsearch_indexer_version = data['passthru']['GITLAB_ELASTICSEARCH_INDEXER_VERSION'] - _call_nix_update('gitlab-elasticsearch-indexer', gitlab_elasticsearch_indexer_version) + gitlab_elasticsearch_indexer_version = data["passthru"][ + "GITLAB_ELASTICSEARCH_INDEXER_VERSION" + ] + _call_nix_update( + "gitlab-elasticsearch-indexer", gitlab_elasticsearch_indexer_version + ) # Update the dependency gitlab-code-parser - src_workdir = subprocess.check_output( - [ - "nix-build", - "-A", - "gitlab-elasticsearch-indexer.src", - ], - cwd=NIXPKGS_PATH, - ).decode("utf-8").strip() + src_workdir = ( + subprocess.check_output( + [ + "nix-build", + "-A", + "gitlab-elasticsearch-indexer.src", + ], + cwd=NIXPKGS_PATH, + ) + .decode("utf-8") + .strip() + ) codeparser_module = json.loads( subprocess.check_output( [ @@ -353,13 +382,17 @@ def update_gitlab_elasticsearch_indexer(): "list", "-m", "-json", - "gitlab.com/gitlab-org/rust/gitlab-code-parser/bindings/go" + "gitlab.com/gitlab-org/rust/gitlab-code-parser/bindings/go", ], - cwd=src_workdir - ).decode("utf-8").strip() + cwd=src_workdir, + ) + .decode("utf-8") + .strip() ) codeparser_version = codeparser_module["Version"].replace("v", "") - _call_nix_update('gitlab-elasticsearch-indexer.codeParserBindings', codeparser_version) + _call_nix_update( + "gitlab-elasticsearch-indexer.codeParserBindings", codeparser_version + ) @cli.command("update-all") @@ -423,11 +456,7 @@ def commit_gitlab(old_version: str, new_version: str, new_rev: str) -> None: def commit_container_registry(old_version: str, new_version: str) -> None: """Commits the gitlab-container-registry changes for you""" subprocess.run( - [ - "git", - "add", - "pkgs/by-name/gi/gitlab-container-registry" - ], + ["git", "add", "pkgs/by-name/gi/gitlab-container-registry"], cwd=NIXPKGS_PATH, ) subprocess.run( diff --git a/pkgs/by-name/go/google-cloud-sdk/alpha__init__.py b/pkgs/by-name/go/google-cloud-sdk/alpha__init__.py index d120969d8fb8c..7eb6781ba41f2 100755 --- a/pkgs/by-name/go/google-cloud-sdk/alpha__init__.py +++ b/pkgs/by-name/go/google-cloud-sdk/alpha__init__.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Auth for the Google Cloud SDK. -""" +"""Auth for the Google Cloud SDK.""" from googlecloudsdk.calliope import base @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class Alpha(base.Group): - """Alpha versions of gcloud commands.""" + """Alpha versions of gcloud commands.""" diff --git a/pkgs/by-name/go/google-cloud-sdk/beta__init__.py b/pkgs/by-name/go/google-cloud-sdk/beta__init__.py index bb52c5a0bc4a2..0860a559f0d95 100755 --- a/pkgs/by-name/go/google-cloud-sdk/beta__init__.py +++ b/pkgs/by-name/go/google-cloud-sdk/beta__init__.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Auth for the Google Cloud SDK. -""" +"""Auth for the Google Cloud SDK.""" from googlecloudsdk.calliope import base @base.ReleaseTracks(base.ReleaseTrack.BETA) class Beta(base.Group): - """Beta versions of gcloud commands.""" + """Beta versions of gcloud commands.""" diff --git a/pkgs/by-name/hu/hunspell/update-chromium-dictionaries.py b/pkgs/by-name/hu/hunspell/update-chromium-dictionaries.py index eb24fc32937cc..6ffa377eaf935 100755 --- a/pkgs/by-name/hu/hunspell/update-chromium-dictionaries.py +++ b/pkgs/by-name/hu/hunspell/update-chromium-dictionaries.py @@ -12,19 +12,21 @@ from urllib.request import urlopen, Request -DICTIONARIES_CHROMIUM_NIX = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dictionaries-chromium.nix') +DICTIONARIES_CHROMIUM_NIX = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "dictionaries-chromium.nix" +) def get_latest_chromium_stable_release(): - RELEASES_URL = 'https://versionhistory.googleapis.com/v1/chrome/platforms/linux/channels/stable/versions/all/releases' - print(f'GET {RELEASES_URL}') + RELEASES_URL = "https://versionhistory.googleapis.com/v1/chrome/platforms/linux/channels/stable/versions/all/releases" + print(f"GET {RELEASES_URL}") with urlopen(RELEASES_URL) as resp: - return json.load(resp)['releases'][0] + return json.load(resp)["releases"][0] def get_file_revision(revision, file_path): """Fetches the requested Git revision of the given Chromium file.""" - url = f'https://chromium.googlesource.com/chromium/src/+/refs/tags/{revision}/{file_path}?format=TEXT' + url = f"https://chromium.googlesource.com/chromium/src/+/refs/tags/{revision}/{file_path}?format=TEXT" with urlopen(url) as http_response: resp = http_response.read() return base64.b64decode(resp) @@ -32,8 +34,10 @@ def get_file_revision(revision, file_path): def nix_prefetch_git(url, rev): """Prefetches the requested Git revision of the given repository URL.""" - print(f'nix-prefetch-git {url} {rev}') - out = subprocess.check_output(['nix-prefetch-git', '--quiet', '--url', url, '--rev', rev]) + print(f"nix-prefetch-git {url} {rev}") + out = subprocess.check_output( + ["nix-prefetch-git", "--quiet", "--url", url, "--rev", rev] + ) return json.loads(out) @@ -46,32 +50,47 @@ def get_current_revision(): sys.exit(1) -print('Getting latest chromium version...') +print("Getting latest chromium version...") chromium_release = get_latest_chromium_stable_release() -chromium_version = chromium_release['version'] -print(f'chromium version: {chromium_version}') +chromium_version = chromium_release["version"] +print(f"chromium version: {chromium_version}") -print('Getting corresponding hunspell_dictionaries commit...') -deps = get_file_revision(chromium_version, 'DEPS') +print("Getting corresponding hunspell_dictionaries commit...") +deps = get_file_revision(chromium_version, "DEPS") hunspell_dictionaries_pattern = r"^\s*Var\('chromium_git'\)\s*\+\s*'\/chromium\/deps\/hunspell_dictionaries\.git'\s*\+\s*'@'\s*\+\s*'(\w*)',$" -hunspell_dictionaries_commit = re.search(hunspell_dictionaries_pattern, deps.decode(), re.MULTILINE).group(1) -print(f'hunspell_dictionaries commit: {hunspell_dictionaries_commit}') +hunspell_dictionaries_commit = re.search( + hunspell_dictionaries_pattern, deps.decode(), re.MULTILINE +).group(1) +print(f"hunspell_dictionaries commit: {hunspell_dictionaries_commit}") current_commit = get_current_revision() if current_commit == hunspell_dictionaries_commit: - print('Commit is already packaged, no update needed.') + print("Commit is already packaged, no update needed.") sys.exit(0) -print('Commit has changed compared to the current package, updating...') +print("Commit has changed compared to the current package, updating...") -print('Getting hash of hunspell_dictionaries revision...') -hunspell_dictionaries_git = nix_prefetch_git("https://chromium.googlesource.com/chromium/deps/hunspell_dictionaries", hunspell_dictionaries_commit) -hunspell_dictionaries_hash = hunspell_dictionaries_git['hash'] -print(f'hunspell_dictionaries commit hash: {hunspell_dictionaries_hash}') +print("Getting hash of hunspell_dictionaries revision...") +hunspell_dictionaries_git = nix_prefetch_git( + "https://chromium.googlesource.com/chromium/deps/hunspell_dictionaries", + hunspell_dictionaries_commit, +) +hunspell_dictionaries_hash = hunspell_dictionaries_git["hash"] +print(f"hunspell_dictionaries commit hash: {hunspell_dictionaries_hash}") with fileinput.FileInput(DICTIONARIES_CHROMIUM_NIX, inplace=True) as file: for line in file: - result = re.sub(r'^ version = ".+";', f' version = "{chromium_version}";', line) - result = re.sub(r'^ rev = ".*";', f' rev = "{hunspell_dictionaries_commit}";', result) - result = re.sub(r'^ hash = ".+";', f' hash = "{hunspell_dictionaries_hash}";', result) - print(result, end='') + result = re.sub( + r'^ version = ".+";', f' version = "{chromium_version}";', line + ) + result = re.sub( + r'^ rev = ".*";', + f' rev = "{hunspell_dictionaries_commit}";', + result, + ) + result = re.sub( + r'^ hash = ".+";', + f' hash = "{hunspell_dictionaries_hash}";', + result, + ) + print(result, end="") diff --git a/pkgs/by-name/jo/joplin-desktop/update.py b/pkgs/by-name/jo/joplin-desktop/update.py index e1ee691c6a50f..bdc6b105fe1d3 100755 --- a/pkgs/by-name/jo/joplin-desktop/update.py +++ b/pkgs/by-name/jo/joplin-desktop/update.py @@ -7,15 +7,25 @@ from pathlib import Path -from plumbum.cmd import nix_prefetch, nix_build, yarn, chmod, yarn_berry_fetcher, prefetch_npm_deps, diff +from plumbum.cmd import ( + nix_prefetch, + nix_build, + yarn, + chmod, + yarn_berry_fetcher, + prefetch_npm_deps, + diff, +) HERE = Path(__file__).parent + def write_release(release): with HERE.joinpath("release-data.json").open("w") as fd: json.dump(release, fd, indent=2) fd.write("\n") + def dict_to_argstr(d): args = "{ " for key, value in d.items(): @@ -52,7 +62,7 @@ def dict_to_argstr(d): "--rev", "--expr", "null", - package + package, ]().strip() print(release["hash"]) @@ -61,9 +71,7 @@ def dict_to_argstr(d): write_release(release) src_dir = nix_build[ - "--no-out-link", - "-E", - f"((import {{}}).callPackage {package} {{}}).src" + "--no-out-link", "-E", f"((import {{}}).callPackage {package} {{}}).src" ]().strip() print(src_dir) @@ -90,21 +98,25 @@ def dict_to_argstr(d): plugin["name"] = value["cloneUrl"].split("/")[-1].removesuffix(".git") - plugin["url"] = f"{value["cloneUrl"].removesuffix('.git')}/archive/{value["commit"]}.tar.gz" + plugin["url"] = ( + f"{value['cloneUrl'].removesuffix('.git')}/archive/{value['commit']}.tar.gz" + ) plugin["hash"] = nix_prefetch.with_cwd(HERE)[ "--option", "extra-experimental-features", "flakes", - f"((import {{}}).callPackage ./buildPlugin.nix {dict_to_argstr(plugin)}).src" + f"((import {{}}).callPackage ./buildPlugin.nix {dict_to_argstr(plugin)}).src", ]().strip() plugin_src = nix_build.with_cwd(HERE)[ "--no-out-link", "-E", - f"((import {{}}).callPackage ./buildPlugin.nix {dict_to_argstr(plugin)}).src" + f"((import {{}}).callPackage ./buildPlugin.nix {dict_to_argstr(plugin)}).src", ]().strip() - plugin["npmDepsHash"] = prefetch_npm_deps(Path(plugin_src).joinpath("package-lock.json")).strip() + plugin["npmDepsHash"] = prefetch_npm_deps( + Path(plugin_src).joinpath("package-lock.json") + ).strip() release["plugins"][key] = plugin @@ -115,19 +127,14 @@ def dict_to_argstr(d): missing_hashes = HERE.joinpath("missing-hashes.json") with missing_hashes.open("w") as fd: - new_missing_hashes = yarn_berry_fetcher[ - "missing-hashes", - yarn_lock - ]() + new_missing_hashes = yarn_berry_fetcher["missing-hashes", yarn_lock]() fd.write(new_missing_hashes) print("prefetching offline cache...") release["deps_hash"] = yarn_berry_fetcher[ - "prefetch", - yarn_lock, - missing_hashes + "prefetch", yarn_lock, missing_hashes ]().strip() diff --git a/pkgs/by-name/lu/luarocks-packages-updater/updater.py b/pkgs/by-name/lu/luarocks-packages-updater/updater.py index 555cde43ec555..92500aace13db 100755 --- a/pkgs/by-name/lu/luarocks-packages-updater/updater.py +++ b/pkgs/by-name/lu/luarocks-packages-updater/updater.py @@ -119,7 +119,7 @@ def generate_nix(self, results: list[tuple[LuaPlugin, str]], outfilename: str): with tempfile.NamedTemporaryFile("w+") as f: f.write(HEADER) header2 = textwrap.dedent( - """ + """ { stdenv, lib, diff --git a/pkgs/by-name/ma/maven/maven-proxy.py b/pkgs/by-name/ma/maven/maven-proxy.py index 7be8f1e3a7050..d8d8bb765ca11 100644 --- a/pkgs/by-name/ma/maven/maven-proxy.py +++ b/pkgs/by-name/ma/maven/maven-proxy.py @@ -20,11 +20,11 @@ def parse_proxy_url(url): return None return { - 'protocol': parsed.scheme or 'http', - 'host': parsed.hostname, - 'port': parsed.port or (443 if parsed.scheme == 'https' else 80), - 'username': parsed.username, - 'password': parsed.password + "protocol": parsed.scheme or "http", + "host": parsed.hostname, + "port": parsed.port or (443 if parsed.scheme == "https" else 80), + "username": parsed.username, + "password": parsed.password, } @@ -42,9 +42,9 @@ def format_proxy_block(proxy, id_suffix, non_proxy_hosts): return f""" {id_suffix}-proxy true - {proxy['protocol']} - {proxy['host']} - {proxy['port']} + {proxy["protocol"]} + {proxy["host"]} + {proxy["port"]} {auth}{np_hosts} """ @@ -56,13 +56,9 @@ def main(output_path): proxy_blocks = [] if http_proxy: - proxy_blocks.append( - format_proxy_block(http_proxy, "http", non_proxy_hosts) - ) + proxy_blocks.append(format_proxy_block(http_proxy, "http", non_proxy_hosts)) if https_proxy and https_proxy != http_proxy: - proxy_blocks.append( - format_proxy_block(https_proxy, "https", non_proxy_hosts) - ) + proxy_blocks.append(format_proxy_block(https_proxy, "https", non_proxy_hosts)) settings_xml = f""" -{'\n'.join(proxy_blocks)} +{"\n".join(proxy_blocks)} """ diff --git a/pkgs/by-name/ne/nextcloud-talk-desktop/update.py b/pkgs/by-name/ne/nextcloud-talk-desktop/update.py index 21027bfc551db..83d5321d44dd9 100755 --- a/pkgs/by-name/ne/nextcloud-talk-desktop/update.py +++ b/pkgs/by-name/ne/nextcloud-talk-desktop/update.py @@ -11,29 +11,35 @@ # Now get the hash for Darwin. # (It's the same for both Darwin platforms, and we don't support aarch64-linux). newVer = subprocess.run( - ["nix-instantiate", "--eval", "--raw", "-A", "nextcloud-talk-desktop.version"], capture_output=True, encoding="locale" + ["nix-instantiate", "--eval", "--raw", "-A", "nextcloud-talk-desktop.version"], + capture_output=True, + encoding="locale", ).stdout -darwinUrl = ( - f"https://github.com/nextcloud-releases/talk-desktop/releases/download/v{newVer}/Nextcloud.Talk-macos-universal.dmg" -) +darwinUrl = f"https://github.com/nextcloud-releases/talk-desktop/releases/download/v{newVer}/Nextcloud.Talk-macos-universal.dmg" oldDarwinHash = subprocess.run( - ["nix-instantiate", "--eval", "--raw", "-A", f"nextcloud-talk-desktop.passthru.hashes.darwin"], - capture_output=True, - encoding="locale", + [ + "nix-instantiate", + "--eval", + "--raw", + "-A", + f"nextcloud-talk-desktop.passthru.hashes.darwin", + ], + capture_output=True, + encoding="locale", ).stdout newDarwinHash = subprocess.run( - ["bash", "-c", f"nix store prefetch-file {darwinUrl} --json | jq -r '.hash'"], - capture_output=True, - encoding="locale", + ["bash", "-c", f"nix store prefetch-file {darwinUrl} --json | jq -r '.hash'"], + capture_output=True, + encoding="locale", ).stdout.strip() # Has a newline with open(join(dirname(__file__), "package.nix"), "r") as f: - txt = f.read() + txt = f.read() txt = txt.replace(oldDarwinHash, newDarwinHash) with open(join(dirname(__file__), "package.nix"), "w") as f: - f.write(txt) + f.write(txt) diff --git a/pkgs/by-name/ni/nix-required-mounts/nix_required_mounts.py b/pkgs/by-name/ni/nix-required-mounts/nix_required_mounts.py index 448c9a1e46aba..c5781ae0149d9 100644 --- a/pkgs/by-name/ni/nix-required-mounts/nix_required_mounts.py +++ b/pkgs/by-name/ni/nix-required-mounts/nix_required_mounts.py @@ -86,9 +86,9 @@ def validate_mounts(pattern: Pattern) -> List[Tuple[PathString, PathString, bool roots.extend((m, m, pattern["unsafeFollowSymlinks"]) for m in matches) else: assert isinstance(mount, dict) and "host" in mount, mount - assert Path( - mount["host"] - ).exists(), f"Specified host paths do not exist: {mount['host']}" + assert Path(mount["host"]).exists(), ( + f"Specified host paths do not exist: {mount['host']}" + ) roots.append( ( mount["guest"], diff --git a/pkgs/by-name/ni/nixos-render-docs-redirects/src/nixos_render_docs_redirects/__init__.py b/pkgs/by-name/ni/nixos-render-docs-redirects/src/nixos_render_docs_redirects/__init__.py index d09849ac48669..6d299be2ecf32 100644 --- a/pkgs/by-name/ni/nixos-render-docs-redirects/src/nixos_render_docs_redirects/__init__.py +++ b/pkgs/by-name/ni/nixos-render-docs-redirects/src/nixos_render_docs_redirects/__init__.py @@ -4,7 +4,9 @@ from pathlib import Path -def add_content(redirects: dict[str, list[str]], identifier: str, path: str) -> dict[str, list[str]]: +def add_content( + redirects: dict[str, list[str]], identifier: str, path: str +) -> dict[str, list[str]]: if identifier in redirects: raise IdentifierExists(identifier) @@ -20,7 +22,9 @@ def add_content(redirects: dict[str, list[str]], identifier: str, path: str) -> return dict(new_redirects) -def move_content(redirects: dict[str, list[str]], identifier: str, path: str) -> dict[str, list[str]]: +def move_content( + redirects: dict[str, list[str]], identifier: str, path: str +) -> dict[str, list[str]]: if identifier not in redirects: raise IdentifierNotFound(identifier) redirects[identifier].insert(0, f"{path}#{identifier}") @@ -28,9 +32,7 @@ def move_content(redirects: dict[str, list[str]], identifier: str, path: str) -> def rename_identifier( - redirects: dict[str, list[str]], - old_identifier: str, - new_identifier: str + redirects: dict[str, list[str]], old_identifier: str, new_identifier: str ) -> dict[str, list[str]]: if old_identifier not in redirects: raise IdentifierNotFound(old_identifier) @@ -44,7 +46,7 @@ def rename_identifier( for key, value in redirects.items(): if key == old_identifier: new_redirects[new_identifier] = value - current_path = value[0].split('#')[0] + current_path = value[0].split("#")[0] continue new_redirects[key] = value new_redirects[new_identifier].insert(0, f"{current_path}#{new_identifier}") @@ -52,9 +54,7 @@ def rename_identifier( def remove_and_redirect( - redirects: dict[str, list[str]], - old_identifier: str, - new_identifier: str + redirects: dict[str, list[str]], old_identifier: str, new_identifier: str ) -> dict[str, list[str]]: if old_identifier not in redirects: raise IdentifierNotFound(old_identifier) @@ -65,7 +65,9 @@ def remove_and_redirect( def main(): - parser = argparse.ArgumentParser(description="redirects manipulation for nixos manuals") + parser = argparse.ArgumentParser( + description="redirects manipulation for nixos manuals" + ) commands = parser.add_subparsers(dest="command", required=True) parser.add_argument("-f", "--file", type=Path, required=True) @@ -100,12 +102,20 @@ def main(): print(f"Moved '{args.identifier}' to the new path: {args.path}") elif args.command == "rename-identifier": - redirects = rename_identifier(redirects, args.old_identifier, args.new_identifier) - print(f"Renamed identifier from {args.old_identifier} to {args.new_identifier}") + redirects = rename_identifier( + redirects, args.old_identifier, args.new_identifier + ) + print( + f"Renamed identifier from {args.old_identifier} to {args.new_identifier}" + ) elif args.command == "remove-and-redirect": - redirects = remove_and_redirect(redirects, args.identifier, args.target_identifier) - print(f"Redirect from '{args.identifier}' to '{args.target_identifier}' added.") + redirects = remove_and_redirect( + redirects, args.identifier, args.target_identifier + ) + print( + f"Redirect from '{args.identifier}' to '{args.target_identifier}' added." + ) except Exception as error: print(error, file=sys.stderr) else: diff --git a/pkgs/by-name/ni/nixos-render-docs-redirects/src/tests/test_redirects.py b/pkgs/by-name/ni/nixos-render-docs-redirects/src/tests/test_redirects.py index ae648d5e26c9a..e46f4f5bce9d0 100644 --- a/pkgs/by-name/ni/nixos-render-docs-redirects/src/tests/test_redirects.py +++ b/pkgs/by-name/ni/nixos-render-docs-redirects/src/tests/test_redirects.py @@ -27,7 +27,6 @@ def test_add_content(self): with self.assertRaises(IdentifierExists): add_content(result, "foo", "another/path.html") - def test_move_content(self): initial_redirects = { "foo": ["path/to/foo.html#foo"], @@ -44,7 +43,6 @@ def test_move_content(self): with self.assertRaises(IdentifierNotFound): move_content(result, "baz", "path.html") - def test_rename_identifier(self): initial_redirects = { "foo": ["path/to/foo.html#foo"], @@ -65,7 +63,6 @@ def test_rename_identifier(self): with self.assertRaises(IdentifierExists): rename_identifier(result, "boo", "boo") - def test_remove_and_redirect(self): initial_redirects = { "foo": ["new/path.html#foo", "path/to/foo.html#foo"], @@ -73,7 +70,11 @@ def test_remove_and_redirect(self): "baz": ["path/to/baz.html#baz"], } final_redirects = { - "bar": ["path/to/bar.html#bar", "new/path.html#foo", "path/to/foo.html#foo"], + "bar": [ + "path/to/bar.html#bar", + "new/path.html#foo", + "path/to/foo.html#foo", + ], "baz": ["path/to/baz.html#baz"], } diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/__init__.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/__init__.py index 19bf705795a93..86b667dcbe321 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/__init__.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/__init__.py @@ -9,6 +9,7 @@ from . import options from . import parallel + def pretty_print_exc(e: BaseException, *, _desc_text: str = "error") -> None: print(f"\x1b[1;31m{_desc_text}:\x1b[0m", file=sys.stderr) # destructure Exception and RuntimeError specifically so we can show nice @@ -31,24 +32,25 @@ def pretty_print_exc(e: BaseException, *, _desc_text: str = "error") -> None: print("", file=sys.stderr) pretty_print_exc(e.__cause__, _desc_text="caused by") + def main() -> None: - parser = argparse.ArgumentParser(description='render nixos manual bits') - parser.add_argument('-j', '--jobs', type=int, default=None) + parser = argparse.ArgumentParser(description="render nixos manual bits") + parser.add_argument("-j", "--jobs", type=int, default=None) - commands = parser.add_subparsers(dest='command', required=True) + commands = parser.add_subparsers(dest="command", required=True) - options.build_cli(commands.add_parser('options')) - manual.build_cli(commands.add_parser('manual')) + options.build_cli(commands.add_parser("options")) + manual.build_cli(commands.add_parser("manual")) args = parser.parse_args() try: parallel.pool_processes = args.jobs - if args.command == 'options': + if args.command == "options": options.run_cli(args) - elif args.command == 'manual': + elif args.command == "manual": manual.run_cli(args) else: - raise RuntimeError('command not hooked up', args) + raise RuntimeError("command not hooked up", args) except Exception as e: traceback.print_exc() pretty_print_exc(e) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/asciidoc.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/asciidoc.py index dadcaa96b4673..2f269c25dedd8 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/asciidoc.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/asciidoc.py @@ -9,47 +9,52 @@ _asciidoc_escapes = { # escape all dots, just in case one is pasted at SOL - ord('.'): "{zwsp}.", + ord("."): "{zwsp}.", # may be replaced by typographic variants ord("'"): "{apos}", ord('"'): "{quot}", # passthrough character - ord('+'): "{plus}", + ord("+"): "{plus}", # table marker - ord('|'): "{vbar}", + ord("|"): "{vbar}", # xml entity reference - ord('&'): "{amp}", + ord("&"): "{amp}", # crossrefs. < needs extra escaping because links break in odd ways if they start with it - ord('<'): "{zwsp}+<+{zwsp}", - ord('>'): "{gt}", + ord("<"): "{zwsp}+<+{zwsp}", + ord(">"): "{gt}", # anchors, links, block attributes - ord('['): "{startsb}", - ord(']'): "{endsb}", + ord("["): "{startsb}", + ord("]"): "{endsb}", # superscript, subscript - ord('^'): "{caret}", - ord('~'): "{tilde}", + ord("^"): "{caret}", + ord("~"): "{tilde}", # bold - ord('*'): "{asterisk}", + ord("*"): "{asterisk}", # backslash - ord('\\'): "{backslash}", + ord("\\"): "{backslash}", # inline code - ord('`'): "{backtick}", + ord("`"): "{backtick}", } + + def asciidoc_escape(s: str) -> str: s = s.translate(_asciidoc_escapes) # :: is deflist item, ;; is has a replacement but no idea why return s.replace("::", "{two-colons}").replace(";;", "{two-semicolons}") + @dataclass(kw_only=True) class List: head: str + @dataclass() class Par: sep: str block_delim: str continuing: bool = False + class AsciiDocRenderer(Renderer): __output__ = "asciidoc" @@ -59,16 +64,22 @@ class AsciiDocRenderer(Renderer): def __init__(self, manpage_urls: Mapping[str, str]): super().__init__(manpage_urls) - self._parstack = [ Par("\n\n", "====") ] + self._parstack = [Par("\n\n", "====")] self._list_stack = [] self._attrspans = [] def _enter_block(self, is_list: bool) -> None: - self._parstack.append(Par("\n+\n" if is_list else "\n\n", self._parstack[-1].block_delim + "=")) + self._parstack.append( + Par("\n+\n" if is_list else "\n\n", self._parstack[-1].block_delim + "=") + ) + def _leave_block(self) -> None: self._parstack.pop() + def _break(self, force: bool = False) -> str: - result = self._parstack[-1].sep if force or self._parstack[-1].continuing else "" + result = ( + self._parstack[-1].sep if force or self._parstack[-1].continuing else "" + ) self._parstack[-1].continuing = True return result @@ -76,20 +87,22 @@ def _admonition_open(self, kind: str) -> str: pbreak = self._break() self._enter_block(False) return f"{pbreak}[{kind}]\n{self._parstack[-2].block_delim}\n" + def _admonition_close(self) -> str: self._leave_block() return f"\n{self._parstack[-1].block_delim}\n" def _list_open(self, token: Token, head: str) -> str: attrs = [] - if (idx := token.attrs.get('start')) is not None: + if (idx := token.attrs.get("start")) is not None: attrs.append(f"start={idx}") - if token.meta['compact']: + if token.meta["compact"]: attrs.append('options="compact"') if self._list_stack: head *= len(self._list_stack[0].head) + 1 self._list_stack.append(List(head=head)) return f"{self._break()}[{','.join(attrs)}]" + def _list_close(self) -> str: self._list_stack.pop() return "" @@ -97,107 +110,147 @@ def _list_close(self) -> str: def text(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True return asciidoc_escape(token.content) + def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._break() + def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: return " +\n" + def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: return " " + def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True return f"``{asciidoc_escape(token.content)}``" + def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self.fence(token, tokens, i) + def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True return f"link:{quote(cast(str, token.attrs['href']), safe='/:')}[" + def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "]" + def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._enter_block(True) # allow the next token to be a block or an inline. - return f'\n{self._list_stack[-1].head} {{empty}}' + return f"\n{self._list_stack[-1].head} {{empty}}" + def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return "\n" + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return self._list_open(token, '*') + return self._list_open(token, "*") + def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._list_close() + def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "__" + def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "__" + def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "**" + def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "**" + def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str: attrs = f"[source,{token.info}]\n" if token.info else "" code = token.content - if code.endswith('\n'): + if code.endswith("\n"): code = code[:-1] return f"{self._break(True)}{attrs}----\n{code}\n----" + def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: pbreak = self._break(True) self._enter_block(False) return f"{pbreak}[quote]\n{self._parstack[-2].block_delim}\n" + def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return f"\n{self._parstack[-1].block_delim}" + def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("NOTE") + def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("CAUTION") + def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("IMPORTANT") + def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("TIP") + def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("WARNING") + def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return f"{self._break()}[]" + def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._break() + def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._enter_block(True) return ":: {empty}" + def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return "\n" + def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str: # NixOS-specific roles are documented at /doc/README.md (with reverse reference) self._parstack[-1].continuing = True content = asciidoc_escape(token.content) - if token.meta['name'] == 'manpage' and (url := self._manpage_urls.get(token.content)): + if token.meta["name"] == "manpage" and ( + url := self._manpage_urls.get(token.content) + ): return f"link:{quote(url, safe='/:')}[{content}]" return f"[.{token.meta['name']}]``{asciidoc_escape(token.content)}``" + def inline_anchor(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True return f"[[{token.attrs['id']}]]" + def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True (id_part, class_part) = ("", "") - if id := token.attrs.get('id'): + if id := token.attrs.get("id"): id_part = f"[[{id}]]" - if s := token.attrs.get('class'): - if s == 'keycap': + if s := token.attrs.get("class"): + if s == "keycap": class_part = "kbd:[" self._attrspans.append("]") else: @@ -205,13 +258,18 @@ def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str: else: self._attrspans.append("") return id_part + class_part + def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._attrspans.pop() + def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return token.markup.replace("#", "=") + " " + def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "\n" + def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return self._list_open(token, '.') + return self._list_open(token, ".") + def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._list_close() diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py index 7f31d0be44ae9..18f20a11f1a33 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/commonmark.py @@ -6,17 +6,20 @@ from markdown_it.token import Token + @dataclass(kw_only=True) class List: next_idx: Optional[int] = None compact: bool first_item_seen: bool = False + @dataclass class Par: indent: str continuing: bool = False + class CommonMarkRenderer(Renderer): __output__ = "commonmark" @@ -26,20 +29,27 @@ class CommonMarkRenderer(Renderer): def __init__(self, manpage_urls: Mapping[str, str]): super().__init__(manpage_urls) - self._parstack = [ Par("") ] + self._parstack = [Par("")] self._link_stack = [] self._list_stack = [] def _enter_block(self, extra_indent: str) -> None: self._parstack.append(Par(self._parstack[-1].indent + extra_indent)) + def _leave_block(self) -> None: self._parstack.pop() self._parstack[-1].continuing = True + def _break(self) -> str: self._parstack[-1].continuing = True return f"\n{self._parstack[-1].indent}" + def _maybe_parbreak(self) -> str: - result = f"\n{self._parstack[-1].indent}" * 2 if self._parstack[-1].continuing else "" + result = ( + f"\n{self._parstack[-1].indent}" * 2 + if self._parstack[-1].continuing + else "" + ) self._parstack[-1].continuing = True return result @@ -47,145 +57,198 @@ def _admonition_open(self, kind: str) -> str: pbreak = self._maybe_parbreak() self._enter_block("") return f"{pbreak}**{kind}:** " + def _admonition_close(self) -> str: self._leave_block() return "" def _indent_raw(self, s: str) -> str: - if '\n' not in s: + if "\n" not in s: return s return f"\n{self._parstack[-1].indent}".join(s.splitlines()) def text(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True return self._indent_raw(md_escape(token.content)) + def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._maybe_parbreak() + def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: return f" {self._break()}" + def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._break() + def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True return md_make_code(token.content) + def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self.fence(token, tokens, i) + def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True - self._link_stack.append(cast(str, token.attrs['href'])) + self._link_stack.append(cast(str, token.attrs["href"])) return "[" + def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return f"]({md_escape(self._link_stack.pop())})" + def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: lst = self._list_stack[-1] - lbreak = "" if not lst.first_item_seen else self._break() * (1 if lst.compact else 2) + lbreak = ( + "" if not lst.first_item_seen else self._break() * (1 if lst.compact else 2) + ) lst.first_item_seen = True head = " -" if lst.next_idx is not None: head = f" {lst.next_idx}." lst.next_idx += 1 self._enter_block(" " * (len(head) + 1)) - return f'{lbreak}{head} ' + return f"{lbreak}{head} " + def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return "" + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - self._list_stack.append(List(compact=bool(token.meta['compact']))) + self._list_stack.append(List(compact=bool(token.meta["compact"]))) return self._maybe_parbreak() + def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._list_stack.pop() return "" + def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "*" + def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "*" + def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "**" + def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "**" + def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str: code = token.content - if code.endswith('\n'): + if code.endswith("\n"): code = code[:-1] pbreak = self._maybe_parbreak() - return pbreak + self._indent_raw(md_make_code(code, info=token.info, multiline=True)) + return pbreak + self._indent_raw( + md_make_code(code, info=token.info, multiline=True) + ) + def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: pbreak = self._maybe_parbreak() self._enter_block("> ") return pbreak + "> " + def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return "" + def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("Note") + def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("Caution") + def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("Important") + def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("Tip") + def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("Warning") + def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._list_stack.append(List(compact=False)) return "" + def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._list_stack.pop() return "" + def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: pbreak = self._maybe_parbreak() self._enter_block(" ") # add an opening zero-width non-joiner to separate *our* emphasis from possible # emphasis in the provided term - return f'{pbreak} - *{chr(0x200C)}' + return f"{pbreak} - *{chr(0x200C)}" + def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return f"{chr(0x200C)}*" + def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._parstack[-1].continuing = True return "" + def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return "" + def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str: # NixOS-specific roles are documented at /doc/README.md (with reverse reference) self._parstack[-1].continuing = True content = md_make_code(token.content) - if token.meta['name'] == 'manpage' and (url := self._manpage_urls.get(token.content)): + if token.meta["name"] == "manpage" and ( + url := self._manpage_urls.get(token.content) + ): return f"[{content}]({url})" - return content # no roles in regular commonmark + return content # no roles in regular commonmark + def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str: # there's no way we can emit attrspans correctly in all cases. we could use inline # html for ids, but that would not round-trip. same holds for classes. since this # renderer is only used for approximate options export and all of these things are # not allowed in options we can ignore them for now. return "" + def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return token.markup + " " + def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "\n" + def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._list_stack.append( - List(next_idx = cast(int, token.attrs.get('start', 1)), - compact = bool(token.meta['compact']))) + List( + next_idx=cast(int, token.attrs.get("start", 1)), + compact=bool(token.meta["compact"]), + ) + ) return self._maybe_parbreak() + def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._list_stack.pop() return "" + def image(self, token: Token, tokens: Sequence[Token], i: int) -> str: - if title := cast(str, token.attrs.get('title', '')): + if title := cast(str, token.attrs.get("title", "")): title = ' "' + title.replace('"', '\\"') + '"' - return f'![{token.content}]({token.attrs["src"]}{title})' + return f"![{token.content}]({token.attrs['src']}{title})" diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/html.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/html.py index 3e4ff1aedb7a8..16e9e022d95db 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/html.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/html.py @@ -7,9 +7,11 @@ from .manual_structure import XrefTarget from .md import Renderer + class UnresolvedXrefError(Exception): pass + class Heading(NamedTuple): container_tag: str level: int @@ -21,8 +23,10 @@ class Heading(NamedTuple): # after the heading titlepage (and maybe partinfo) has been closed. toc_fragment: str -_bullet_list_styles = [ 'disc', 'circle', 'square' ] -_ordered_list_styles = [ '1', 'a', 'i', 'A', 'I' ] + +_bullet_list_styles = ["disc", "circle", "square"] +_ordered_list_styles = ["1", "a", "i", "A", "I"] + class HTMLRenderer(Renderer): _xref_targets: Mapping[str, XrefTarget] @@ -33,7 +37,9 @@ class HTMLRenderer(Renderer): _bullet_list_nesting: int = 0 _ordered_list_nesting: int = 0 - def __init__(self, manpage_urls: Mapping[str, str], xref_targets: Mapping[str, XrefTarget]): + def __init__( + self, manpage_urls: Mapping[str, str], xref_targets: Mapping[str, XrefTarget] + ): super().__init__(manpage_urls) self._headings = [] self._attrspans = [] @@ -49,106 +55,146 @@ def _pull_image(self, path: str) -> str: def text(self, token: Token, tokens: Sequence[Token], i: int) -> str: return escape(token.content) + def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "

" + def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "

" + def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
" + def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "\n" + def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str: return f'{escape(token.content)}' + def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self.fence(token, tokens, i) + def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - href = escape(cast(str, token.attrs['href']), True) + href = escape(cast(str, token.attrs["href"]), True) tag, title, target, text = "link", "", 'target="_top"', "" - if href.startswith('#'): + if href.startswith("#"): if not (xref := self._xref_targets.get(href[1:])): raise UnresolvedXrefError(f"bad local reference, id {href} not known") - if tokens[i + 1].type == 'link_close': + if tokens[i + 1].type == "link_close": tag, text = "xref", xref.title_html if xref.title: # titles are not attribute-safe on their own, so we need to replace quotes. - title = 'title="{}"'.format(xref.title.replace('"', '"')) + title = 'title="{}"'.format(xref.title.replace('"', """)) target, href = "", xref.href() return f'{text}' + def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '
  • ' + def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
  • " + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - extra = 'compact' if token.meta.get('compact', False) else '' - style = _bullet_list_styles[self._bullet_list_nesting % len(_bullet_list_styles)] + extra = "compact" if token.meta.get("compact", False) else "" + style = _bullet_list_styles[ + self._bullet_list_nesting % len(_bullet_list_styles) + ] self._bullet_list_nesting += 1 return f'
      ' + def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._bullet_list_nesting -= 1 return "
    " + def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '' + def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '' + def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str: info = f" {escape(token.info, True)}" if token.info != "" else "" return f'
    {escape(token.content)}
    ' + def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '
    ' + def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '

    Note

    ' + def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '

    Caution

    ' + def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '

    Important

    ' + def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '

    Tip

    ' + def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '

    Warning

    ' + def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '
    ' + def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '
    ' + def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str: # NixOS-specific roles are documented at /doc/README.md (with reverse reference) - if token.meta['name'] == 'command': - return f'{escape(token.content)}' - if token.meta['name'] == 'file': + if token.meta["name"] == "command": + return ( + f'{escape(token.content)}' + ) + if token.meta["name"] == "file": return f'{escape(token.content)}' - if token.meta['name'] == 'var': + if token.meta["name"] == "var": return f'{escape(token.content)}' - if token.meta['name'] == 'env': + if token.meta["name"] == "env": return f'{escape(token.content)}' - if token.meta['name'] == 'option': + if token.meta["name"] == "option": return f'{escape(token.content)}' - if token.meta['name'] == 'manpage': - [page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ] + if token.meta["name"] == "manpage": + [page, section] = [s.strip() for s in token.content.rsplit("(", 1)] section = section[:-1] man = f"{page}({section})" title = f'{escape(page)}' @@ -159,14 +205,15 @@ def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str: else: return ref return super().myst_role(token, tokens, i) + def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str: # we currently support *only* inline anchors and the special .keycap class to produce # keycap-styled spans. (id_part, class_part) = ("", "") - if s := token.attrs.get('id'): + if s := token.attrs.get("id"): id_part = f'' - if s := token.attrs.get('class'): - if s == 'keycap': + if s := token.attrs.get("class"): + if s == "keycap": class_part = '' self._attrspans.append("") else: @@ -174,158 +221,179 @@ def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str: else: self._attrspans.append("") return id_part + class_part + def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._attrspans.pop() + def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: hlevel = int(token.tag[1:]) htag, hstyle = self._make_hN(hlevel) if hstyle: hstyle = f'style="{escape(hstyle, True)}"' - if anchor := cast(str, token.attrs.get('id', '')): + if anchor := cast(str, token.attrs.get("id", "")): anchor = f'id="{escape(anchor, True)}"' result = self._close_headings(hlevel) tag = self._heading_tag(token, tokens, i) toc_fragment = self._build_toc(tokens, i) - self._headings.append(Heading(tag, hlevel, htag, tag != 'part', toc_fragment)) + self._headings.append(Heading(tag, hlevel, htag, tag != "part", toc_fragment)) return ( - f'{result}' + f"{result}" f'
    ' f'
    ' - f'
    ' - f'
    ' + f"
    " + f"
    " f' <{htag} {anchor} class="title" {hstyle}>' ) + def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: heading = self._headings[-1] - result = ( - f' ' - f'
    ' - f'
    ' - f'
    ' - ) - if heading.container_tag == 'part': + result = f"
    " + if heading.container_tag == "part": result += '
    ' else: result += heading.toc_fragment return result + def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - extra = 'compact' if token.meta.get('compact', False) else '' - start = f'start="{token.attrs["start"]}"' if 'start' in token.attrs else "" - style = _ordered_list_styles[self._ordered_list_nesting % len(_ordered_list_styles)] + extra = "compact" if token.meta.get("compact", False) else "" + start = f'start="{token.attrs["start"]}"' if "start" in token.attrs else "" + style = _ordered_list_styles[ + self._ordered_list_nesting % len(_ordered_list_styles) + ] self._ordered_list_nesting += 1 return f'
      ' + def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._ordered_list_nesting -= 1 return "
    " + def example_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - if id := cast(str, token.attrs.get('id', '')): - id = f'id="{escape(id, True)}"' if id else '' + if id := cast(str, token.attrs.get("id", "")): + id = f'id="{escape(id, True)}"' if id else "" return f'
    ' + def example_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '

    ' + def example_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '' + def example_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return '
    ' + def image(self, token: Token, tokens: Sequence[Token], i: int) -> str: - src = self._pull_image(cast(str, token.attrs['src'])) + src = self._pull_image(cast(str, token.attrs["src"])) alt = f'alt="{escape(token.content, True)}"' if token.content else "" - if title := cast(str, token.attrs.get('title', '')): + if title := cast(str, token.attrs.get("title", "")): title = f'title="{escape(title, True)}"' return ( '
    ' f'' - '
    ' + "
    " ) + def figure_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - if anchor := cast(str, token.attrs.get('id', '')): + if anchor := cast(str, token.attrs.get("id", "")): anchor = f'' return f'
    {anchor}' + def figure_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return ( - '
    ' - '
    ' - ) + return '
    ' + def figure_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return ( - '

    ' - ' ' - ) + return '

    ' + def figure_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return ( - ' ' - '

    ' - '
    ' - ) + return '

    ' + def table_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return ( - '
    ' - '' - ) + return '
    ' + def table_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return ( - '
    ' - '
    ' - ) + return "
    " + def thead_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: cols = [] for j in range(i + 1, len(tokens)): - if tokens[j].type == 'thead_close': + if tokens[j].type == "thead_close": break - elif tokens[j].type == 'th_open': - cols.append(cast(str, tokens[j].attrs.get('style', 'left')).removeprefix('text-align:')) - return "".join([ - "", - "".join([ f'' for col in cols ]), - "", - "", - ]) + elif tokens[j].type == "th_open": + cols.append( + cast(str, tokens[j].attrs.get("style", "left")).removeprefix( + "text-align:" + ) + ) + return "".join( + [ + "", + "".join([f'' for col in cols]), + "", + "", + ] + ) + def thead_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def tr_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def tr_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def th_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return f'' + def th_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def tbody_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def tbody_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def td_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return f'' + def td_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def footnote_ref(self, token: Token, tokens: Sequence[Token], i: int) -> str: - href = self._xref_targets[token.meta['target']].href() + href = self._xref_targets[token.meta["target"]].href() id = escape(cast(str, token.attrs["id"]), True) return ( f'' f'[{token.meta["id"] + 1}]' - '' + "" ) + def footnote_block_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return ( '
    ' - '
    ' + "
    " '
    ' ) - def footnote_block_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: + + def footnote_block_close( + self, token: Token, tokens: Sequence[Token], i: int + ) -> str: return "
    " + def footnote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: # meta id,label id = escape(self._xref_targets[token.meta["label"]].id, True) return f'
    ' + def footnote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "
    " + def footnote_anchor(self, token: Token, tokens: Sequence[Token], i: int) -> str: - href = self._xref_targets[token.meta['target']].href() + href = self._xref_targets[token.meta["target"]].href() return ( f'' f'[{token.meta["id"] + 1}]' - '' + "" ) def _make_hN(self, level: int) -> tuple[str, str]: @@ -334,14 +402,16 @@ def _make_hN(self, level: int) -> tuple[str, str]: def _maybe_close_partintro(self) -> str: if self._headings: heading = self._headings[-1] - if heading.container_tag == 'part' and not heading.partintro_closed: + if heading.container_tag == "part" and not heading.partintro_closed: self._headings[-1] = heading._replace(partintro_closed=True) return heading.toc_fragment + "
    " return "" def _close_headings(self, level: Optional[int]) -> str: result = [] - while len(self._headings) and (level is None or self._headings[-1].level >= level): + while len(self._headings) and ( + level is None or self._headings[-1].level >= level + ): result.append(self._maybe_close_partintro()) result.append("") self._headings.pop() @@ -349,5 +419,6 @@ def _close_headings(self, level: Optional[int]) -> str: def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "section" + def _build_toc(self, tokens: Sequence[Token], i: int) -> str: return "" diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manpage.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manpage.py index a3d6e791cabdf..9112a4f80b052 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manpage.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manpage.py @@ -25,33 +25,42 @@ # turn into a typographic hyphen), and . (roff request marker at SOL, changes spacing semantics # at EOL). groff additionally does not allow unicode escapes for codepoints below U+0080, so # those need "proper" roff escapes/replacements instead. -_roff_unicode = re.compile(r'''[^\n !#$%&()*+,\-./0-9:;<=>?@A-Z[\\\]_a-z{|}]''', re.ASCII) +_roff_unicode = re.compile( + r"""[^\n !#$%&()*+,\-./0-9:;<=>?@A-Z[\\\]_a-z{|}]""", re.ASCII +) _roff_escapes = { ord('"'): "\\(dq", ord("'"): "\\(aq", - ord('-'): "\\-", - ord('.'): "\\&.", - ord('\\'): "\\e", - ord('^'): "\\(ha", - ord('`'): "\\(ga", - ord('~'): "\\(ti", + ord("-"): "\\-", + ord("."): "\\&.", + ord("\\"): "\\e", + ord("^"): "\\(ha", + ord("`"): "\\(ga", + ord("~"): "\\(ti", } + + def man_escape(s: str) -> str: s = s.translate(_roff_escapes) return _roff_unicode.sub(lambda m: f"\\[u{ord(m[0]):04X}]", s) + # remove leading and trailing spaces from links and condense multiple consecutive spaces # into a single space for presentation parity with html. this is currently easiest with # regex postprocessing and some marker characters. since we don't want to drop spaces # from code blocks we will have to specially protect *inline* code (luckily not block code) # so normalization can turn the spaces inside it into regular spaces again. -_normalize_space_re = re.compile(r'''\u0000 < *| *>\u0000 |(?<= ) +''') +_normalize_space_re = re.compile(r"""\u0000 < *| *>\u0000 |(?<= ) +""") + + def _normalize_space(s: str) -> str: return _normalize_space_re.sub("", s).replace("\0p", " ") + def _protect_spaces(s: str) -> str: return s.replace(" ", "\0p") + @dataclass(kw_only=True) class List: width: int @@ -59,6 +68,7 @@ class List: compact: bool first_item_seen: bool = False + # this renderer assumed that it produces a set of lines as output, and that those lines will # be pasted as-is into a larger output. no prefixing or suffixing is allowed for correctness. # @@ -95,15 +105,18 @@ def __init__(self, manpage_urls: Mapping[str, str], href_targets: dict[str, str] self._font_stack = [] def _join_block(self, ls: Iterable[str]) -> str: - return "\n".join([ l for l in ls if len(l) ]) + return "\n".join([l for l in ls if len(l)]) + def _join_inline(self, ls: Iterable[str]) -> str: return _normalize_space(super()._join_inline(ls)) def _enter_block(self) -> None: self._do_parbreak_stack.append(False) + def _leave_block(self) -> None: self._do_parbreak_stack.pop() self._do_parbreak_stack[-1] = True + def _maybe_parbreak(self, suffix: str = "") -> str: result = f".sp{suffix}" if self._do_parbreak_stack[-1] else "" self._do_parbreak_stack[-1] = True @@ -111,45 +124,49 @@ def _maybe_parbreak(self, suffix: str = "") -> str: def _admonition_open(self, kind: str) -> str: self._enter_block() - return ( - '.sp\n' - '.RS 4\n' - f'\\fB{kind}\\fP\n' - '.br' - ) + return f".sp\n.RS 4\n\\fB{kind}\\fP\n.br" + def _admonition_close(self) -> str: self._leave_block() return ".RE" def render(self, tokens: Sequence[Token]) -> str: - self._do_parbreak_stack = [ False ] - self._font_stack = [ "\\fR" ] + self._do_parbreak_stack = [False] + self._font_stack = ["\\fR"] return super().render(tokens) def text(self, token: Token, tokens: Sequence[Token], i: int) -> str: return man_escape(token.content) + def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._maybe_parbreak() + def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: return ".br" + def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: return " " + def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str: s = _protect_spaces(man_escape(token.content)) return f"\\fR\\(oq{s}\\(cq\\fP" if self.inline_code_is_quoted else s + def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self.fence(token, tokens, i) + def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - href = cast(str, token.attrs['href']) + href = cast(str, token.attrs["href"]) self._link_stack.append(href) text = "" - if tokens[i + 1].type == 'link_close' and href in self._href_targets: + if tokens[i + 1].type == "link_close" and href in self._href_targets: # TODO error or warning if the target can't be resolved text = self._href_targets[href] self._font_stack.append("\\fB") return f"\\fB{text}\0 <" + def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: href = self._link_stack.pop() text = "" @@ -162,127 +179,153 @@ def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: text = "\\fR" + man_escape(f"[{idx}]") self._font_stack.pop() return f">\0 {text}{self._font_stack[-1]}" + def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._enter_block() lst = self._list_stack[-1] - maybe_space = '' if lst.compact or not lst.first_item_seen else '.sp\n' + maybe_space = "" if lst.compact or not lst.first_item_seen else ".sp\n" lst.first_item_seen = True head = "•" if lst.next_idx is not None: head = f"{lst.next_idx}." lst.next_idx += 1 return ( - f'{maybe_space}' - f'.RS {lst.width}\n' + f"{maybe_space}" + f".RS {lst.width}\n" f"\\h'-{len(head) + 1}'\\fB{man_escape(head)}\\fP\\h'1'\\c" ) + def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return ".RE" + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - self._list_stack.append(List(width=4, compact=bool(token.meta['compact']))) + self._list_stack.append(List(width=4, compact=bool(token.meta["compact"]))) return self._maybe_parbreak() + def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._list_stack.pop() return "" + def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._font_stack.append("\\fI") return "\\fI" + def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._font_stack.pop() return self._font_stack[-1] + def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._font_stack.append("\\fB") return "\\fB" + def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._font_stack.pop() return self._font_stack[-1] + def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str: - s = man_escape(token.content).rstrip('\n') - return ( - '.sp\n' - '.RS 4\n' - '.nf\n' - f'{s}\n' - '.fi\n' - '.RE' - ) + s = man_escape(token.content).rstrip("\n") + return f".sp\n.RS 4\n.nf\n{s}\n.fi\n.RE" + def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: maybe_par = self._maybe_parbreak("\n") self._enter_block() - return ( - f"{maybe_par}" - ".RS 4\n" - f"\\h'-3'\\fI\\(lq\\(rq\\fP\\h'1'\\c" - ) + return f"{maybe_par}.RS 4\n\\h'-3'\\fI\\(lq\\(rq\\fP\\h'1'\\c" + def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return ".RE" + def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_open("Note") + def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return self._admonition_open( "Caution") + return self._admonition_open("Caution") + def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return self._admonition_open( "Important") + return self._admonition_open("Important") + def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return self._admonition_open( "Tip") + return self._admonition_open("Tip") + def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - return self._admonition_open( "Warning") + return self._admonition_open("Warning") + def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonition_close() + def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return ".RS 4" + def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return ".RE" + def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: return ".PP" + def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._enter_block() return ".RS 4" + def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._leave_block() return ".RE" + def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str: # NixOS-specific roles are documented at /doc/README.md (with reverse reference) - if token.meta['name'] in [ 'command', 'env', 'option' ]: - return f'\\fB{man_escape(token.content)}\\fP' - elif token.meta['name'] in [ 'file', 'var' ]: - return f'\\fI{man_escape(token.content)}\\fP' - elif token.meta['name'] == 'manpage': - [page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ] + if token.meta["name"] in ["command", "env", "option"]: + return f"\\fB{man_escape(token.content)}\\fP" + elif token.meta["name"] in ["file", "var"]: + return f"\\fI{man_escape(token.content)}\\fP" + elif token.meta["name"] == "manpage": + [page, section] = [s.strip() for s in token.content.rsplit("(", 1)] section = section[:-1] - return f'\\fB{man_escape(page)}\\fP\\fR({man_escape(section)})\\fP' + return f"\\fB{man_escape(page)}\\fP\\fR({man_escape(section)})\\fP" else: raise NotImplementedError("md node not supported yet", token) + def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str: # mdoc knows no anchors so we can drop those, but classes must be rejected. - if 'class' in token.attrs: + if "class" in token.attrs: return super().attr_span_begin(token, tokens, i) return "" + def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str: return "" + def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported in manpages", token) + def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported in manpages", token) + def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: # max item head width for a number, a dot, and one leading space and one trailing space - width = 3 + len(str(cast(int, token.meta['end']))) + width = 3 + len(str(cast(int, token.meta["end"]))) self._list_stack.append( - List(width = width, - next_idx = cast(int, token.attrs.get('start', 1)), - compact = bool(token.meta['compact']))) + List( + width=width, + next_idx=cast(int, token.attrs.get("start", 1)), + compact=bool(token.meta["compact"]), + ) + ) return self._maybe_parbreak() + def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: self._list_stack.pop() return "" diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manual.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manual.py index 7dbb78e075880..5c71542c52b78 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manual.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manual.py @@ -14,11 +14,20 @@ from . import md, options from .html import HTMLRenderer, UnresolvedXrefError -from .manual_structure import check_structure, FragmentType, is_include, make_xml_id, TocEntry, TocEntryType, XrefTarget +from .manual_structure import ( + check_structure, + FragmentType, + is_include, + make_xml_id, + TocEntry, + TocEntryType, + XrefTarget, +) from .md import Converter, Renderer from .redirects import Redirects from .src_error import SrcError + class BaseConverter(Converter[md.TR], Generic[md.TR]): # per-converter configuration for ns:arg=value arguments to include blocks, following # the include type. html converters need something like this to support chunking, or @@ -32,8 +41,8 @@ class BaseConverter(Converter[md.TR], Generic[md.TR]): _current_type: list[TocEntryType] def convert(self, infile: Path, outfile: Path) -> None: - self._base_paths = [ infile ] - self._current_type = ['book'] + self._base_paths = [infile] + self._current_type = ["book"] try: tokens = self._parse(infile.read_text()) self._postprocess(infile, outfile, tokens) @@ -42,10 +51,14 @@ def convert(self, infile: Path, outfile: Path) -> None: except Exception as e: raise RuntimeError(f"failed to render manual {infile}") from e - def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> None: + def _postprocess( + self, infile: Path, outfile: Path, tokens: Sequence[Token] + ) -> None: pass - def _handle_headings(self, tokens: list[Token], *, src: str, on_heading: Callable[[Token,str],None]) -> None: + def _handle_headings( + self, tokens: list[Token], *, src: str, on_heading: Callable[[Token, str], None] + ) -> None: # Headings in a globally numbered order # h1 to h6 curr_heading_pos: list[int] = [] @@ -62,27 +75,26 @@ def _handle_headings(self, tokens: list[Token], *, src: str, on_heading: Callabl if idx >= len(curr_heading_pos): # extend the list if necessary - curr_heading_pos.extend([0 for _i in range(idx+1 - len(curr_heading_pos))]) + curr_heading_pos.extend( + [0 for _i in range(idx + 1 - len(curr_heading_pos))] + ) - curr_heading_pos = curr_heading_pos[:idx+1] + curr_heading_pos = curr_heading_pos[: idx + 1] curr_heading_pos[-1] += 1 - ident = ".".join(f"{a}" for a in curr_heading_pos) - on_heading(token,ident) - - + on_heading(token, ident) def _parse(self, src: str, *, auto_id_prefix: None | str = None) -> list[Token]: tokens = super()._parse(src) if auto_id_prefix: + def set_token_ident(token: Token, ident: str) -> None: if "id" not in token.attrs: token.attrs["id"] = f"{auto_id_prefix}-{ident}" self._handle_headings(tokens, src=src, on_heading=set_token_ident) - check_structure(src, self._current_type[-1], tokens) for token in tokens: if not is_include(token): @@ -90,14 +102,18 @@ def set_token_ident(token: Token, ident: str) -> None: directive = token.info[12:].split() if not directive: continue - args = { k: v for k, _sep, v in map(lambda s: s.partition('='), directive[1:]) } + args = { + k: v for k, _sep, v in map(lambda s: s.partition("="), directive[1:]) + } typ = directive[0] - if typ == 'options': - token.type = 'included_options' - self._process_include_args(src, token, args, self.INCLUDE_OPTIONS_ALLOWED_ARGS) + if typ == "options": + token.type = "included_options" + self._process_include_args( + src, token, args, self.INCLUDE_OPTIONS_ALLOWED_ARGS + ) self._parse_options(src, token, args) else: - fragment_type = typ.removesuffix('s') + fragment_type = typ.removesuffix("s") if fragment_type not in get_args(FragmentType): raise SrcError( src=src, @@ -105,27 +121,33 @@ def set_token_ident(token: Token, ident: str) -> None: token=token, ) self._current_type.append(cast(FragmentType, fragment_type)) - token.type = 'included_' + typ - self._process_include_args(src, token, args, self.INCLUDE_FRAGMENT_ALLOWED_ARGS) + token.type = "included_" + typ + self._process_include_args( + src, token, args, self.INCLUDE_FRAGMENT_ALLOWED_ARGS + ) self._parse_included_blocks(src, token, args) self._current_type.pop() return tokens - def _process_include_args(self, src: str, token: Token, args: dict[str, str], allowed: set[str]) -> None: + def _process_include_args( + self, src: str, token: Token, args: dict[str, str], allowed: set[str] + ) -> None: ns = self.INCLUDE_ARGS_NS + ":" - args = { k[len(ns):]: v for k, v in args.items() if k.startswith(ns) } + args = {k[len(ns) :]: v for k, v in args.items() if k.startswith(ns)} if unknown := set(args.keys()) - allowed: raise SrcError( src=src, description=f"unrecognized include argument(s): {unknown}", token=token, ) - token.meta['include-args'] = args + token.meta["include-args"] = args - def _parse_included_blocks(self, src: str, token: Token, block_args: dict[str, str]) -> None: + def _parse_included_blocks( + self, src: str, token: Token, block_args: dict[str, str] + ) -> None: assert token.map - included = token.meta['included'] = [] - for (lnum, line) in enumerate(token.content.splitlines(), token.map[0] + 1): + included = token.meta["included"] = [] + for lnum, line in enumerate(token.content.splitlines(), token.map[0] + 1): line = line.strip() path = self._base_paths[-1].parent / line if path in self._base_paths: @@ -136,7 +158,7 @@ def _parse_included_blocks(self, src: str, token: Token, block_args: dict[str, s ) try: self._base_paths.append(path) - with open(path, 'r') as f: + with open(path, "r") as f: prefix = None if "auto-id-prefix" in block_args: # include the current file number to prevent duplicate ids within include blocks @@ -152,11 +174,13 @@ def _parse_included_blocks(self, src: str, token: Token, block_args: dict[str, s token=lnum, ) from e - def _parse_options(self, src: str, token: Token, block_args: dict[str, str]) -> None: + def _parse_options( + self, src: str, token: Token, block_args: dict[str, str] + ) -> None: assert token.map items = {} - for (lnum, line) in enumerate(token.content.splitlines(), token.map[0] + 1): + for lnum, line in enumerate(token.content.splitlines(), token.map[0] + 1): if len(args := line.split(":", 1)) != 2: raise SrcError( src=src, @@ -179,9 +203,9 @@ def _parse_options(self, src: str, token: Token, block_args: dict[str, str]) -> items[k] = v try: - id_prefix = items.pop('id-prefix') - varlist_id = items.pop('list-id') - source = items.pop('source') + id_prefix = items.pop("id-prefix") + varlist_id = items.pop("list-id") + source = items.pop("source") except KeyError as e: raise SrcError( src=src, @@ -199,10 +223,10 @@ def _parse_options(self, src: str, token: Token, block_args: dict[str, str]) -> ) try: - with open(self._base_paths[-1].parent / source, 'r') as f: - token.meta['id-prefix'] = id_prefix - token.meta['list-id'] = varlist_id - token.meta['source'] = json.load(f) + with open(self._base_paths[-1].parent / source, "r") as f: + token.meta["id-prefix"] = id_prefix + token.meta["list-id"] = varlist_id + token.meta["source"] = json.load(f) except Exception as e: raise SrcError( src=src, @@ -210,6 +234,7 @@ def _parse_options(self, src: str, token: Token, block_args: dict[str, str]) -> token=token, ) from e + class RendererMixin(Renderer): _toplevel_tag: str _revision: str @@ -219,19 +244,19 @@ def __init__(self, toplevel_tag: str, revision: str, *args: Any, **kwargs: Any): self._toplevel_tag = toplevel_tag self._revision = revision self.rules |= { - 'included_sections': lambda *args: self._included_thing("section", *args), - 'included_chapters': lambda *args: self._included_thing("chapter", *args), - 'included_preface': lambda *args: self._included_thing("preface", *args), - 'included_parts': lambda *args: self._included_thing("part", *args), - 'included_appendix': lambda *args: self._included_thing("appendix", *args), - 'included_options': self.included_options, + "included_sections": lambda *args: self._included_thing("section", *args), + "included_chapters": lambda *args: self._included_thing("chapter", *args), + "included_preface": lambda *args: self._included_thing("preface", *args), + "included_parts": lambda *args: self._included_thing("part", *args), + "included_appendix": lambda *args: self._included_thing("appendix", *args), + "included_options": self.included_options, } def render(self, tokens: Sequence[Token]) -> str: # books get special handling because they have *two* title tags. doing this with # generic code is more complicated than it's worth. the checks above have verified # that both titles actually exist. - if self._toplevel_tag == 'book': + if self._toplevel_tag == "book": return self._render_book(tokens) return super().render(tokens) @@ -241,7 +266,9 @@ def _render_book(self, tokens: Sequence[Token]) -> str: raise NotImplementedError() @abstractmethod - def _included_thing(self, tag: str, token: Token, tokens: Sequence[Token], i: int) -> str: + def _included_thing( + self, tag: str, token: Token, tokens: Sequence[Token], i: int + ) -> str: raise NotImplementedError() @abstractmethod @@ -261,15 +288,24 @@ class HTMLParameters(NamedTuple): section_toc_depth: int media_dir: Path + class ManualHTMLRenderer(RendererMixin, HTMLRenderer): _base_path: Path _in_dir: Path _html_params: HTMLParameters _redirects: Redirects | None - def __init__(self, toplevel_tag: str, revision: str, html_params: HTMLParameters, - manpage_urls: Mapping[str, str], xref_targets: dict[str, XrefTarget], - redirects: Redirects | None, in_dir: Path, base_path: Path): + def __init__( + self, + toplevel_tag: str, + revision: str, + html_params: HTMLParameters, + manpage_urls: Mapping[str, str], + xref_targets: dict[str, XrefTarget], + redirects: Redirects | None, + in_dir: Path, + base_path: Path, + ): super().__init__(toplevel_tag, revision, manpage_urls, xref_targets) self._in_dir = in_dir self._base_path = base_path.absolute() @@ -290,37 +326,51 @@ def _pull_image(self, src: str) -> str: return f"./{self._html_params.media_dir}/{target_name}" def _push(self, tag: str, hlevel_offset: int) -> Any: - result = (self._toplevel_tag, self._headings, self._attrspans, self._hlevel_offset, self._in_dir) + result = ( + self._toplevel_tag, + self._headings, + self._attrspans, + self._hlevel_offset, + self._in_dir, + ) self._hlevel_offset += hlevel_offset self._toplevel_tag, self._headings, self._attrspans = tag, [], [] return result def _pop(self, state: Any) -> None: - (self._toplevel_tag, self._headings, self._attrspans, self._hlevel_offset, self._in_dir) = state + ( + self._toplevel_tag, + self._headings, + self._attrspans, + self._hlevel_offset, + self._in_dir, + ) = state def _render_book(self, tokens: Sequence[Token]) -> str: assert tokens[4].children - title_id = cast(str, tokens[0].attrs.get('id', "")) + title_id = cast(str, tokens[0].attrs.get("id", "")) title = self._xref_targets[title_id].title # subtitles don't have IDs, so we can't use xrefs to get them subtitle = self.renderInline(tokens[4].children) toc = TocEntry.of(tokens[0]) - return "\n".join([ - self._file_header(toc), - '
    ', - '
    ', - '
    ', - f'

    {title}

    ', - f'

    {subtitle}

    ', - '
    ', - "
    ", - '
    ', - self._build_toc(tokens, 0), - super(HTMLRenderer, self).render(tokens[6:]), - '
    ', - self._file_footer(toc), - ]) + return "\n".join( + [ + self._file_header(toc), + '
    ', + '
    ', + "
    ", + f'

    {title}

    ', + f'

    {subtitle}

    ', + "
    ", + "
    ", + "
    ", + self._build_toc(tokens, 0), + super(HTMLRenderer, self).render(tokens[6:]), + "
    ", + self._file_footer(toc), + ] + ) def _file_header(self, toc: TocEntry) -> str: prev_link, up_link, next_link = "", "", "" @@ -335,55 +385,69 @@ def _file_header(self, toc: TocEntry) -> str: f'' ) - if (part := toc.parent) and part.kind != 'book': + if (part := toc.parent) and part.kind != "book": assert part.target.title parent_title = part.target.title if toc.next: next_link = f'' next_a = f'Next' if toc.prev or toc.parent or toc.next: - nav_html = "\n".join([ - ' ', - ]) + nav_html = "\n".join( + [ + ' ", + ] + ) scripts = self._html_params.scripts if self._redirects: - redirects_name = f'{toc.target.path.split('.html')[0]}-redirects.js' - with open(self._base_path / redirects_name, 'w') as file: + redirects_name = f"{toc.target.path.split('.html')[0]}-redirects.js" + with open(self._base_path / redirects_name, "w") as file: file.write(self._redirects.get_redirect_script(toc.target.path)) - scripts.append(f'./{redirects_name}') - - return "\n".join([ - '', - '', - '', - ' ', - ' ', - f' {toc.target.title}', - "".join((f'' - for style in self._html_params.stylesheets)), - "".join((f'' - for script in scripts)), - f' ', - f' ' if home.target.href() else "", - f' {up_link}{prev_link}{next_link}', - ' ', - ' ', - nav_html, - ]) + scripts.append(f"./{redirects_name}") + + return "\n".join( + [ + '', + '', + '', + " ", + ' ', + f" {toc.target.title}", + "".join( + ( + f'' + for style in self._html_params.stylesheets + ) + ), + "".join( + ( + f'' + for script in scripts + ) + ), + f' ', + f' ' + if home.target.href() + else "", + f" {up_link}{prev_link}{next_link}", + " ", + " ", + nav_html, + ] + ) def _file_footer(self, toc: TocEntry) -> str: # prev, next = self._get_prev_and_next() @@ -404,55 +468,64 @@ def _file_footer(self, toc: TocEntry) -> str: assert toc.next.target.title next_text = toc.next.target.title if toc.prev or toc.parent or toc.next: - nav_html = "\n".join([ - ' ', - ]) - return "\n".join([ - nav_html, - ' ', - '', - ]) + nav_html = "\n".join( + [ + ' ", + ] + ) + return "\n".join( + [ + nav_html, + " ", + "", + ] + ) def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int) -> str: - if token.tag == 'h1': + if token.tag == "h1": return self._toplevel_tag return super()._heading_tag(token, tokens, i) + def _build_toc(self, tokens: Sequence[Token], i: int) -> str: toc = TocEntry.of(tokens[i]) - if toc.kind == 'section' and self._html_params.section_toc_depth < 1: + if toc.kind == "section" and self._html_params.section_toc_depth < 1: return "" + def walk_and_emit(toc: TocEntry, depth: int) -> list[str]: if depth <= 0: return [] result = [] for child in toc.children: result.append( - f'
    ' + f"
    " f' ' f' {child.target.toc_html}' - f' ' - f'
    ' + f" " + f"" ) # we want to look straight through parts because docbook-xsl did too, but it # also makes for more uesful top-level tocs. - next_level = walk_and_emit(child, depth - (0 if child.kind == 'part' else 1)) + next_level = walk_and_emit( + child, depth - (0 if child.kind == "part" else 1) + ) if next_level: - result.append(f'
    {"".join(next_level)}
    ') + result.append(f"
    {''.join(next_level)}
    ") return result + def build_list(kind: str, id: str, lst: Sequence[TocEntry]) -> str: if not lst: return "" @@ -462,21 +535,22 @@ def build_list(kind: str, id: str, lst: Sequence[TocEntry]) -> str: ] return ( f'
    ' - f'

    List of {kind}

    ' - f'
    {"".join(entries)}
    ' - '
    ' + f"

    List of {kind}

    " + f"
    {''.join(entries)}
    " + "" ) + # we don't want to generate the "Title of Contents" header for sections, # docbook didn't and it's only distracting clutter unless it's the main table. # we also want to generate tocs only for a top-level section (ie, one that is # not itself contained in another section) - print_title = toc.kind != 'section' - if toc.kind == 'section': - if toc.parent and toc.parent.kind == 'section': + print_title = toc.kind != "section" + if toc.kind == "section": + if toc.parent and toc.parent.kind == "section": toc_depth = 0 else: toc_depth = self._html_params.section_toc_depth - elif toc.starts_new_chunk and toc.kind != 'book': + elif toc.starts_new_chunk and toc.kind != "book": toc_depth = self._html_params.chunk_toc_depth else: toc_depth = self._html_params.toc_depth @@ -484,45 +558,48 @@ def build_list(kind: str, id: str, lst: Sequence[TocEntry]) -> str: return "" figures = build_list("Figures", "list-of-figures", toc.figures) examples = build_list("Examples", "list-of-examples", toc.examples) - return "".join([ - f'
    ', - '

    Table of Contents

    ' if print_title else "", - f'
    ' - f' {"".join(items)}' - f'
    ' - f'
    ' - f'{figures}' - f'{examples}' - ]) + return "".join( + [ + f'
    ', + "

    Table of Contents

    " if print_title else "", + f'
    {"".join(items)}
    {figures}{examples}', + ] + ) def _make_hN(self, level: int) -> tuple[str, str]: # for some reason chapters didn't increase the hN nesting count in docbook xslts. # originally this was duplicated here for consistency with docbook rendering, but # it could be reevaluated and changed now that docbook is gone. - if self._toplevel_tag == 'chapter': + if self._toplevel_tag == "chapter": level -= 1 # this style setting is also for docbook compatibility only and could well go away. style = "" - if level + self._hlevel_offset < 3 \ - and (self._toplevel_tag == 'section' or (self._toplevel_tag == 'chapter' and level > 0)): + if level + self._hlevel_offset < 3 and ( + self._toplevel_tag == "section" + or (self._toplevel_tag == "chapter" and level > 0) + ): style = "clear: both" tag, hstyle = super()._make_hN(max(1, level)) return tag, style - def _included_thing(self, tag: str, token: Token, tokens: Sequence[Token], i: int) -> str: + def _included_thing( + self, tag: str, token: Token, tokens: Sequence[Token], i: int + ) -> str: outer, inner = [], [] # since books have no non-include content the toplevel book wrapper will not count # towards nesting depth. other types will have at least a title+id heading which # *does* count towards the nesting depth. chapters give a -1 to included sections # mirroring the special handing in _make_hN. sigh. hoffset = ( - 0 if not self._headings - else self._headings[-1].level - 1 if self._toplevel_tag == 'chapter' + 0 + if not self._headings + else self._headings[-1].level - 1 + if self._toplevel_tag == "chapter" else self._headings[-1].level ) outer.append(self._maybe_close_partintro()) - into = token.meta['include-args'].get('into-file') - fragments = token.meta['included'] + into = token.meta["include-args"].get("into-file") + fragments = token.meta["included"] state = self._push(tag, hoffset) if into: toc = TocEntry.of(fragments[0][0][0]) @@ -544,18 +621,24 @@ def _included_thing(self, tag: str, token: Token, tokens: Sequence[Token], i: in return "".join(outer) def included_options(self, token: Token, tokens: Sequence[Token], i: int) -> str: - conv = options.HTMLConverter(self._manpage_urls, self._revision, - token.meta['list-id'], token.meta['id-prefix'], - self._xref_targets) - conv.add_options(token.meta['source']) + conv = options.HTMLConverter( + self._manpage_urls, + self._revision, + token.meta["list-id"], + token.meta["id-prefix"], + self._xref_targets, + ) + conv.add_options(token.meta["source"]) return conv.finalize() + def _to_base26(n: int) -> str: return (_to_base26(n // 26) if n > 26 else "") + chr(ord("A") + n % 26) + class HTMLConverter(BaseConverter[ManualHTMLRenderer]): INCLUDE_ARGS_NS = "html" - INCLUDE_FRAGMENT_ALLOWED_ARGS = { 'into-file' } + INCLUDE_FRAGMENT_ALLOWED_ARGS = {"into-file"} _revision: str _html_params: HTMLParameters @@ -569,27 +652,46 @@ def _next_appendix_id(self) -> str: self._appendix_count += 1 return _to_base26(self._appendix_count - 1) - def __init__(self, revision: str, html_params: HTMLParameters, manpage_urls: Mapping[str, str], redirects: Redirects | None = None): + def __init__( + self, + revision: str, + html_params: HTMLParameters, + manpage_urls: Mapping[str, str], + redirects: Redirects | None = None, + ): super().__init__() - self._revision, self._html_params, self._manpage_urls, self._redirects = revision, html_params, manpage_urls, redirects + self._revision, self._html_params, self._manpage_urls, self._redirects = ( + revision, + html_params, + manpage_urls, + redirects, + ) self._xref_targets = {} self._redirection_targets = set() # renderer not set on purpose since it has a dependency on the output path! def convert(self, infile: Path, outfile: Path) -> None: self._renderer = ManualHTMLRenderer( - 'book', self._revision, self._html_params, self._manpage_urls, self._xref_targets, - self._redirects, infile.parent, outfile.parent) + "book", + self._revision, + self._html_params, + self._manpage_urls, + self._xref_targets, + self._redirects, + infile.parent, + outfile.parent, + ) super().convert(infile, outfile) def _parse(self, src: str, *, auto_id_prefix: None | str = None) -> list[Token]: - tokens = super()._parse(src,auto_id_prefix=auto_id_prefix) + tokens = super()._parse(src, auto_id_prefix=auto_id_prefix) for token in tokens: - if not token.type.startswith('included_') \ - or not (into := token.meta['include-args'].get('into-file')): + if not token.type.startswith("included_") or not ( + into := token.meta["include-args"].get("into-file") + ): continue assert token.map - if len(token.meta['included']) == 0: + if len(token.meta["included"]) == 0: raise SrcError( src=src, description=f"redirection target {into!r} is empty!", @@ -597,13 +699,13 @@ def _parse(self, src: str, *, auto_id_prefix: None | str = None) -> list[Token]: ) # we use blender-style //path to denote paths relative to the origin file # (usually index.html). this makes everything a lot easier and clearer. - if not into.startswith("//") or '/' in into[2:]: + if not into.startswith("//") or "/" in into[2:]: raise SrcError( src=src, description=f"html:into-file must be a relative-to-origin //filename: {into}", token=token, ) - into = token.meta['include-args']['into-file'] = into[2:] + into = token.meta["include-args"]["into-file"] = into[2:] if into in self._redirection_targets: raise SrcError( src=src, @@ -613,105 +715,145 @@ def _parse(self, src: str, *, auto_id_prefix: None | str = None) -> list[Token]: self._redirection_targets.add(into) return tokens - def _number_block(self, block: str, prefix: str, tokens: Sequence[Token], start: int = 1) -> int: - title_open, title_close = f'{block}_title_open', f'{block}_title_close' - for (i, token) in enumerate(tokens): + def _number_block( + self, block: str, prefix: str, tokens: Sequence[Token], start: int = 1 + ) -> int: + title_open, title_close = f"{block}_title_open", f"{block}_title_close" + for i, token in enumerate(tokens): if token.type == title_open: title = tokens[i + 1] - assert title.type == 'inline' and title.children + assert title.type == "inline" and title.children # the prefix is split into two tokens because the xref title_html will want # only the first of the two, but both must be rendered into the example itself. - title.children = ( - [ - Token('text', '', 0, content=f'{prefix} {start}'), - Token('text', '', 0, content='. ') - ] + title.children - ) + title.children = [ + Token("text", "", 0, content=f"{prefix} {start}"), + Token("text", "", 0, content=". "), + ] + title.children start += 1 - elif token.type.startswith('included_') and token.type != 'included_options': - for sub, _path in token.meta['included']: + elif ( + token.type.startswith("included_") and token.type != "included_options" + ): + for sub, _path in token.meta["included"]: start = self._number_block(block, prefix, sub, start) return start # xref | (id, type, heading inlines, file, starts new file) - def _collect_ids(self, tokens: Sequence[Token], target_file: str, typ: str, file_changed: bool - ) -> list[XrefTarget | tuple[str, str, Token, str, bool]]: + def _collect_ids( + self, tokens: Sequence[Token], target_file: str, typ: str, file_changed: bool + ) -> list[XrefTarget | tuple[str, str, Token, str, bool]]: result: list[XrefTarget | tuple[str, str, Token, str, bool]] = [] # collect all IDs and their xref substitutions. headings are deferred until everything # has been parsed so we can resolve links in headings. if that's even used anywhere. - for (i, bt) in enumerate(tokens): - if bt.type == 'heading_open' and (id := cast(str, bt.attrs.get('id', ''))): - result.append((id, typ if bt.tag == 'h1' else 'section', tokens[i + 1], target_file, - i == 0 and file_changed)) - elif bt.type == 'included_options': - id_prefix = bt.meta['id-prefix'] - for opt in bt.meta['source'].keys(): + for i, bt in enumerate(tokens): + if bt.type == "heading_open" and (id := cast(str, bt.attrs.get("id", ""))): + result.append( + ( + id, + typ if bt.tag == "h1" else "section", + tokens[i + 1], + target_file, + i == 0 and file_changed, + ) + ) + elif bt.type == "included_options": + id_prefix = bt.meta["id-prefix"] + for opt in bt.meta["source"].keys(): id = make_xml_id(f"{id_prefix}{opt}") name = html.escape(opt) - result.append(XrefTarget(id, f'{name}', name, None, target_file)) - elif bt.type.startswith('included_'): - sub_file = bt.meta['include-args'].get('into-file', target_file) - subtyp = bt.type.removeprefix('included_').removesuffix('s') - for si, (sub, _path) in enumerate(bt.meta['included']): - result += self._collect_ids(sub, sub_file, subtyp, si == 0 and sub_file != target_file) - elif bt.type == 'example_open' and (id := cast(str, bt.attrs.get('id', ''))): - result.append((id, 'example', tokens[i + 2], target_file, False)) - elif bt.type == 'figure_open' and (id := cast(str, bt.attrs.get('id', ''))): - result.append((id, 'figure', tokens[i + 2], target_file, False)) - elif bt.type == 'footnote_open' and (id := cast(str, bt.attrs.get('id', ''))): + result.append( + XrefTarget( + id, + f'{name}', + name, + None, + target_file, + ) + ) + elif bt.type.startswith("included_"): + sub_file = bt.meta["include-args"].get("into-file", target_file) + subtyp = bt.type.removeprefix("included_").removesuffix("s") + for si, (sub, _path) in enumerate(bt.meta["included"]): + result += self._collect_ids( + sub, sub_file, subtyp, si == 0 and sub_file != target_file + ) + elif bt.type == "example_open" and ( + id := cast(str, bt.attrs.get("id", "")) + ): + result.append((id, "example", tokens[i + 2], target_file, False)) + elif bt.type == "figure_open" and (id := cast(str, bt.attrs.get("id", ""))): + result.append((id, "figure", tokens[i + 2], target_file, False)) + elif bt.type == "footnote_open" and ( + id := cast(str, bt.attrs.get("id", "")) + ): result.append(XrefTarget(id, "???", None, None, target_file)) - elif bt.type == 'footnote_ref' and (id := cast(str, bt.attrs.get('id', ''))): + elif bt.type == "footnote_ref" and ( + id := cast(str, bt.attrs.get("id", "")) + ): result.append(XrefTarget(id, "???", None, None, target_file)) - elif bt.type == 'inline': + elif bt.type == "inline": assert bt.children is not None result += self._collect_ids(bt.children, target_file, typ, False) - elif id := cast(str, bt.attrs.get('id', '')): + elif id := cast(str, bt.attrs.get("id", "")): # anchors and examples have no titles we could use, but we'll have to put # *something* here to communicate that there's no title. result.append(XrefTarget(id, "???", None, None, target_file)) return result - def _render_xref(self, id: str, typ: str, inlines: Token, path: str, drop_fragment: bool) -> XrefTarget: + def _render_xref( + self, id: str, typ: str, inlines: Token, path: str, drop_fragment: bool + ) -> XrefTarget: assert inlines.children title_html = self._renderer.renderInline(inlines.children) - if typ == 'appendix': + if typ == "appendix": # NOTE the docbook compat is strong here n = self._next_appendix_id() - prefix = f"Appendix\u00A0{n}.\u00A0" + prefix = f"Appendix\u00a0{n}.\u00a0" # HACK for docbook compat: prefix the title inlines with appendix id if # necessary. the alternative is to mess with titlepage rendering in headings, # which seems just a lot worse than this - prefix_tokens = [Token(type='text', tag='', nesting=0, content=prefix)] + prefix_tokens = [Token(type="text", tag="", nesting=0, content=prefix)] inlines.children = prefix_tokens + list(inlines.children) title = prefix + title_html toc_html = f"{n}. {title_html}" title_html = f"Appendix {n}" - elif typ in ['example', 'figure']: + elif typ in ["example", "figure"]: # skip the prepended `{Example,Figure} N. ` from numbering - toc_html, title = self._renderer.renderInline(inlines.children[2:]), title_html + toc_html, title = ( + self._renderer.renderInline(inlines.children[2:]), + title_html, + ) # xref title wants only the prepended text, sans the trailing colon and space title_html = self._renderer.renderInline(inlines.children[0:1]) else: toc_html, title = title_html, title_html title_html = ( f"{title_html}" - if typ == 'chapter' - else title_html if typ in [ 'book', 'part' ] - else f'the section called “{title_html}”' + if typ == "chapter" + else title_html + if typ in ["book", "part"] + else f"the section called “{title_html}”" ) - return XrefTarget(id, title_html, toc_html, re.sub('<.*?>', '', title), path, drop_fragment) + return XrefTarget( + id, title_html, toc_html, re.sub("<.*?>", "", title), path, drop_fragment + ) - def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> None: - self._number_block('example', "Example", tokens) - self._number_block('figure', "Figure", tokens) - xref_queue = self._collect_ids(tokens, outfile.name, 'book', True) + def _postprocess( + self, infile: Path, outfile: Path, tokens: Sequence[Token] + ) -> None: + self._number_block("example", "Example", tokens) + self._number_block("figure", "Figure", tokens) + xref_queue = self._collect_ids(tokens, outfile.name, "book", True) failed = False deferred = [] while xref_queue: for item in xref_queue: try: - target = item if isinstance(item, XrefTarget) else self._render_xref(*item) + target = ( + item + if isinstance(item, XrefTarget) + else self._render_xref(*item) + ) except UnresolvedXrefError: if failed: raise @@ -722,7 +864,7 @@ def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> raise RuntimeError(f"found duplicate id #{target.id}") self._xref_targets[target.id] = target if len(deferred) == len(xref_queue): - failed = True # do another round and report the first error + failed = True # do another round and report the first error xref_queue = deferred paths_seen = set() @@ -730,7 +872,7 @@ def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> paths_seen.add(t.path) if len(paths_seen) == 1: - for (k, t) in self._xref_targets.items(): + for k, t in self._xref_targets.items(): self._xref_targets[k] = XrefTarget( t.id, t.title_html, @@ -738,14 +880,14 @@ def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> t.title, t.path, t.drop_fragment, - drop_target=True + drop_target=True, ) TocEntry.collect_and_link(self._xref_targets, tokens) if self._redirects: self._redirects.validate(self._xref_targets) server_redirects = self._redirects.get_server_redirects() - with open(outfile.parent / '_redirects', 'w') as server_redirects_file: + with open(outfile.parent / "_redirects", "w") as server_redirects_file: formatted_server_redirects = [] for from_path, to_path in server_redirects.items(): formatted_server_redirects.append(f"{from_path} {to_path} 301") @@ -753,21 +895,25 @@ def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> def _build_cli_html(p: argparse.ArgumentParser) -> None: - p.add_argument('--manpage-urls', required=True) - p.add_argument('--revision', required=True) - p.add_argument('--generator', default='nixos-render-docs') - p.add_argument('--stylesheet', default=[], action='append') - p.add_argument('--script', default=[], action='append') - p.add_argument('--toc-depth', default=1, type=int) - p.add_argument('--chunk-toc-depth', default=1, type=int) - p.add_argument('--section-toc-depth', default=0, type=int) - p.add_argument('--media-dir', default="media", type=Path) - p.add_argument('--redirects', type=Path) - p.add_argument('infile', type=Path) - p.add_argument('outfile', type=Path) + p.add_argument("--manpage-urls", required=True) + p.add_argument("--revision", required=True) + p.add_argument("--generator", default="nixos-render-docs") + p.add_argument("--stylesheet", default=[], action="append") + p.add_argument("--script", default=[], action="append") + p.add_argument("--toc-depth", default=1, type=int) + p.add_argument("--chunk-toc-depth", default=1, type=int) + p.add_argument("--section-toc-depth", default=0, type=int) + p.add_argument("--media-dir", default="media", type=Path) + p.add_argument("--redirects", type=Path) + p.add_argument("infile", type=Path) + p.add_argument("outfile", type=Path) + def _run_cli_html(args: argparse.Namespace) -> None: - with open(args.manpage_urls) as manpage_urls, open(Path(__file__).parent / "redirects.js") as redirects_script: + with ( + open(args.manpage_urls) as manpage_urls, + open(Path(__file__).parent / "redirects.js") as redirects_script, + ): redirects = None if args.redirects: with open(args.redirects) as raw_redirects: @@ -775,17 +921,28 @@ def _run_cli_html(args: argparse.Namespace) -> None: md = HTMLConverter( args.revision, - HTMLParameters(args.generator, args.stylesheet, args.script, args.toc_depth, - args.chunk_toc_depth, args.section_toc_depth, args.media_dir), - json.load(manpage_urls), redirects) + HTMLParameters( + args.generator, + args.stylesheet, + args.script, + args.toc_depth, + args.chunk_toc_depth, + args.section_toc_depth, + args.media_dir, + ), + json.load(manpage_urls), + redirects, + ) md.convert(args.infile, args.outfile) + def build_cli(p: argparse.ArgumentParser) -> None: - formats = p.add_subparsers(dest='format', required=True) - _build_cli_html(formats.add_parser('html')) + formats = p.add_subparsers(dest="format", required=True) + _build_cli_html(formats.add_parser("html")) + def run_cli(args: argparse.Namespace) -> None: - if args.format == 'html': + if args.format == "html": _run_cli_html(args) else: - raise RuntimeError('format not hooked up', args) + raise RuntimeError("format not hooked up", args) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manual_structure.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manual_structure.py index 64effecb88f51..802833a51f19d 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manual_structure.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/manual_structure.py @@ -12,14 +12,18 @@ from .src_error import SrcError # FragmentType is used to restrict structural include blocks. -FragmentType = Literal['preface', 'part', 'chapter', 'section', 'appendix'] +FragmentType = Literal["preface", "part", "chapter", "section", "appendix"] # in the TOC all fragments are allowed, plus the all-encompassing book. -TocEntryType = Literal['book', 'preface', 'part', 'chapter', 'section', 'appendix', 'example', 'figure'] +TocEntryType = Literal[ + "book", "preface", "part", "chapter", "section", "appendix", "example", "figure" +] + def is_include(token: Token) -> bool: return token.type == "fence" and token.info.startswith("{=include=} ") + # toplevel file must contain only the title headings and includes, anything else # would cause strange rendering. def _check_book_structure(src: str, tokens: Sequence[Token]) -> None: @@ -31,28 +35,32 @@ def _check_book_structure(src: str, tokens: Sequence[Token]) -> None: token=token, ) + # much like books, parts may not contain headings other than their title heading. # this is a limitation of the current renderers and TOC generators that do not handle # this case well even though it is supported in docbook (and probably supportable # anywhere else). -def _check_part_structure(src: str,tokens: Sequence[Token]) -> None: +def _check_part_structure(src: str, tokens: Sequence[Token]) -> None: _check_fragment_structure(src, tokens) for token in tokens[3:]: - if token.type == 'heading_open': + if token.type == "heading_open": raise SrcError( src=src, description="unexpected heading", token=token, ) + # two include blocks must either be adjacent or separated by a heading, otherwise # we cannot generate a correct TOC (since there'd be nothing to link to between # the two includes). def _check_fragment_structure(src: str, tokens: Sequence[Token]) -> None: for i, token in enumerate(tokens): - if is_include(token) \ - and i + 1 < len(tokens) \ - and not (is_include(tokens[i + 1]) or tokens[i + 1].type == 'heading_open'): + if ( + is_include(token) + and i + 1 < len(tokens) + and not (is_include(tokens[i + 1]) or tokens[i + 1].type == "heading_open") + ): assert token.map raise SrcError( src=src, @@ -60,21 +68,22 @@ def _check_fragment_structure(src: str, tokens: Sequence[Token]) -> None: token=token, ) + def check_structure(src: str, kind: TocEntryType, tokens: Sequence[Token]) -> None: - wanted = { 'h1': 'title' } - wanted |= { 'h2': 'subtitle' } if kind == 'book' else {} - for (i, (tag, role)) in enumerate(wanted.items()): + wanted = {"h1": "title"} + wanted |= {"h2": "subtitle"} if kind == "book" else {} + for i, (tag, role) in enumerate(wanted.items()): if len(tokens) < 3 * (i + 1): raise RuntimeError(f"missing {role} ({tag}) heading") token = tokens[3 * i] - if token.type != 'heading_open' or token.tag != tag: + if token.type != "heading_open" or token.tag != tag: raise SrcError( src=src, description=f"expected {role} ({tag}) heading", token=token, ) - for t in tokens[3 * len(wanted):]: - if t.type != 'heading_open' or not (role := wanted.get(t.tag, '')): + for t in tokens[3 * len(wanted) :]: + if t.type != "heading_open" or not (role := wanted.get(t.tag, "")): continue raise SrcError( src=src, @@ -86,36 +95,37 @@ def check_structure(src: str, kind: TocEntryType, tokens: Sequence[Token]) -> No last_heading_level = 0 for token in tokens: - if token.type != 'heading_open': + if token.type != "heading_open": continue # book subtitle headings do not need an id, only book title headings do. # every other headings needs one too. we need this to build a TOC and to # provide stable links if the manual changes shape. - if 'id' not in token.attrs and (kind != 'book' or token.tag != 'h2'): + if "id" not in token.attrs and (kind != "book" or token.tag != "h2"): raise SrcError( src=src, description=f"heading does not have an id", token=token, ) - level = int(token.tag[1:]) # because tag = h1..h6 + level = int(token.tag[1:]) # because tag = h1..h6 if level > last_heading_level + 1: raise SrcError( src=src, description=f"heading skips one or more heading levels, " - "which is currently not allowed", + "which is currently not allowed", token=token, ) last_heading_level = level - if kind == 'book': + if kind == "book": _check_book_structure(src, tokens) - elif kind == 'part': + elif kind == "part": _check_part_structure(src, tokens) else: _check_fragment_structure(src, tokens) + @dc.dataclass(frozen=True) class XrefTarget: id: str @@ -137,6 +147,7 @@ def href(self) -> str: path = "" if self.drop_target else html.escape(self.path, True) return path if self.drop_fragment else f"{path}#{html.escape(self.id, True)}" + @dc.dataclass class TocEntry(Freezeable): kind: TocEntryType @@ -155,18 +166,24 @@ def root(self) -> TocEntry: @classmethod def of(cls, token: Token) -> TocEntry: - entry = token.meta.get('TocEntry') + entry = token.meta.get("TocEntry") if not isinstance(entry, TocEntry): - raise RuntimeError('requested toc entry, none found', token) + raise RuntimeError("requested toc entry, none found", token) return entry @classmethod - def collect_and_link(cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token]) -> TocEntry: - entries, examples, figures = cls._collect_entries(xrefs, tokens, 'book') + def collect_and_link( + cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token] + ) -> TocEntry: + entries, examples, figures = cls._collect_entries(xrefs, tokens, "book") - def flatten_with_parent(this: TocEntry, parent: TocEntry | None) -> Iterable[TocEntry]: + def flatten_with_parent( + this: TocEntry, parent: TocEntry | None + ) -> Iterable[TocEntry]: this.parent = parent - return itertools.chain([this], *[ flatten_with_parent(c, this) for c in this.children ]) + return itertools.chain( + [this], *[flatten_with_parent(c, this) for c in this.children] + ) flat = list(flatten_with_parent(entries, None)) prev = flat[0] @@ -188,8 +205,9 @@ def flatten_with_parent(this: TocEntry, parent: TocEntry | None) -> Iterable[Toc return entries @classmethod - def _collect_entries(cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token], - kind: TocEntryType) -> tuple[TocEntry, list[TocEntry], list[TocEntry]]: + def _collect_entries( + cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token], kind: TocEntryType + ) -> tuple[TocEntry, list[TocEntry], list[TocEntry]]: # we assume that check_structure has been run recursively over the entire input. # list contains (tag, entry) pairs that will collapse to a single entry for # the full sequence. @@ -197,40 +215,57 @@ def _collect_entries(cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token], examples: list[TocEntry] = [] figures: list[TocEntry] = [] for token in tokens: - if token.type.startswith('included_') and (included := token.meta.get('included')): - fragment_type_str = token.type[9:].removesuffix('s') + if token.type.startswith("included_") and ( + included := token.meta.get("included") + ): + fragment_type_str = token.type[9:].removesuffix("s") assert fragment_type_str in get_args(TocEntryType) fragment_type = cast(TocEntryType, fragment_type_str) for fragment, _path in included: - subentries, subexamples, subfigures = cls._collect_entries(xrefs, fragment, fragment_type) + subentries, subexamples, subfigures = cls._collect_entries( + xrefs, fragment, fragment_type + ) entries[-1][1].children.append(subentries) examples += subexamples figures += subfigures - elif token.type == 'heading_open' and (id := cast(str, token.attrs.get('id', ''))): + elif token.type == "heading_open" and ( + id := cast(str, token.attrs.get("id", "")) + ): while len(entries) > 1 and entries[-1][0] >= token.tag: entries[-2][1].children.append(entries.pop()[1]) - entries.append((token.tag, - TocEntry(kind if token.tag == 'h1' else 'section', xrefs[id]))) - token.meta['TocEntry'] = entries[-1][1] - elif token.type == 'example_open' and (id := cast(str, token.attrs.get('id', ''))): - examples.append(TocEntry('example', xrefs[id])) - elif token.type == 'figure_open' and (id := cast(str, token.attrs.get('id', ''))): - figures.append(TocEntry('figure', xrefs[id])) + entries.append( + ( + token.tag, + TocEntry(kind if token.tag == "h1" else "section", xrefs[id]), + ) + ) + token.meta["TocEntry"] = entries[-1][1] + elif token.type == "example_open" and ( + id := cast(str, token.attrs.get("id", "")) + ): + examples.append(TocEntry("example", xrefs[id])) + elif token.type == "figure_open" and ( + id := cast(str, token.attrs.get("id", "")) + ): + figures.append(TocEntry("figure", xrefs[id])) while len(entries) > 1: entries[-2][1].children.append(entries.pop()[1]) return (entries[0][1], examples, figures) + _xml_id_translate_table = { - ord('*'): ord('_'), - ord('<'): ord('_'), - ord(' '): ord('_'), - ord('>'): ord('_'), - ord('['): ord('_'), - ord(']'): ord('_'), - ord(':'): ord('_'), - ord('"'): ord('_'), + ord("*"): ord("_"), + ord("<"): ord("_"), + ord(" "): ord("_"), + ord(">"): ord("_"), + ord("["): ord("_"), + ord("]"): ord("_"), + ord(":"): ord("_"), + ord('"'): ord("_"), } + + # this function is needed to generate option id attributes in the same format as # the docbook toolchain did to not break existing links. we don't actually use # xml any more, that's just the legacy we're dealing with and part of our structure diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/md.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/md.py index 237c554075bd3..f6d46338036d3 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/md.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/md.py @@ -1,6 +1,17 @@ from abc import ABC from collections.abc import Mapping, MutableMapping, Sequence -from typing import Any, Callable, cast, Generic, get_args, Iterable, Literal, NoReturn, Optional, TypeVar +from typing import ( + Any, + Callable, + cast, + Generic, + get_args, + Iterable, + Literal, + NoReturn, + Optional, + TypeVar, +) import dataclasses import re @@ -11,41 +22,46 @@ import markdown_it from markdown_it.token import Token from markdown_it.utils import OptionsDict -from mdit_py_plugins.container import container_plugin # type: ignore[attr-defined] -from mdit_py_plugins.deflist import deflist_plugin # type: ignore[attr-defined] -from mdit_py_plugins.footnote import footnote_plugin # type: ignore[attr-defined] -from mdit_py_plugins.myst_role import myst_role_plugin # type: ignore[attr-defined] +from mdit_py_plugins.container import container_plugin # type: ignore[attr-defined] +from mdit_py_plugins.deflist import deflist_plugin # type: ignore[attr-defined] +from mdit_py_plugins.footnote import footnote_plugin # type: ignore[attr-defined] +from mdit_py_plugins.myst_role import myst_role_plugin # type: ignore[attr-defined] _md_escape_table = { - ord('*'): '\\*', - ord('<'): '\\<', - ord('['): '\\[', - ord('`'): '\\`', - ord('.'): '\\.', - ord('#'): '\\#', - ord('&'): '\\&', - ord('\\'): '\\\\', + ord("*"): "\\*", + ord("<"): "\\<", + ord("["): "\\[", + ord("`"): "\\`", + ord("."): "\\.", + ord("#"): "\\#", + ord("&"): "\\&", + ord("\\"): "\\\\", } + + def md_escape(s: str) -> str: return s.translate(_md_escape_table) + def md_make_code(code: str, info: str = "", multiline: Optional[bool] = None) -> str: # for multi-line code blocks we only have to count ` runs at the beginning # of a line, but this is much easier. - multiline = multiline or info != "" or '\n' in code + multiline = multiline or info != "" or "\n" in code longest, current = (0, 0) for c in code: - current = current + 1 if c == '`' else 0 + current = current + 1 if c == "`" else 0 longest = max(current, longest) # inline literals need a space to separate ticks from content, code blocks # need newlines. inline literals need one extra tick, code blocks need three. - ticks, sep = ('`' * (longest + (3 if multiline else 1)), '\n' if multiline else ' ') + ticks, sep = ("`" * (longest + (3 if multiline else 1)), "\n" if multiline else " ") return f"{ticks}{info}{sep}{code}{sep}{ticks}" -AttrBlockKind = Literal['admonition', 'example', 'figure'] + +AttrBlockKind = Literal["admonition", "example", "figure"] AdmonitionKind = Literal["note", "caution", "tip", "important", "warning"] + class Renderer: _admonitions: dict[AdmonitionKind, tuple[RenderFn, RenderFn]] _admonition_stack: list[AdmonitionKind] @@ -53,33 +69,33 @@ class Renderer: def __init__(self, manpage_urls: Mapping[str, str]): self._manpage_urls = manpage_urls self.rules = { - 'text': self.text, - 'paragraph_open': self.paragraph_open, - 'paragraph_close': self.paragraph_close, - 'hardbreak': self.hardbreak, - 'softbreak': self.softbreak, - 'code_inline': self.code_inline, - 'code_block': self.code_block, - 'link_open': self.link_open, - 'link_close': self.link_close, - 'list_item_open': self.list_item_open, - 'list_item_close': self.list_item_close, - 'bullet_list_open': self.bullet_list_open, - 'bullet_list_close': self.bullet_list_close, - 'em_open': self.em_open, - 'em_close': self.em_close, - 'strong_open': self.strong_open, - 'strong_close': self.strong_close, - 'fence': self.fence, - 'blockquote_open': self.blockquote_open, - 'blockquote_close': self.blockquote_close, - 'dl_open': self.dl_open, - 'dl_close': self.dl_close, - 'dt_open': self.dt_open, - 'dt_close': self.dt_close, - 'dd_open': self.dd_open, - 'dd_close': self.dd_close, - 'myst_role': self.myst_role, + "text": self.text, + "paragraph_open": self.paragraph_open, + "paragraph_close": self.paragraph_close, + "hardbreak": self.hardbreak, + "softbreak": self.softbreak, + "code_inline": self.code_inline, + "code_block": self.code_block, + "link_open": self.link_open, + "link_close": self.link_close, + "list_item_open": self.list_item_open, + "list_item_close": self.list_item_close, + "bullet_list_open": self.bullet_list_open, + "bullet_list_close": self.bullet_list_close, + "em_open": self.em_open, + "em_close": self.em_close, + "strong_open": self.strong_open, + "strong_close": self.strong_close, + "fence": self.fence, + "blockquote_open": self.blockquote_open, + "blockquote_close": self.blockquote_close, + "dl_open": self.dl_open, + "dl_close": self.dl_close, + "dt_open": self.dt_open, + "dt_close": self.dt_close, + "dd_open": self.dd_open, + "dd_close": self.dd_close, + "myst_role": self.myst_role, "admonition_open": self.admonition_open, "admonition_close": self.admonition_close, "attr_span_begin": self.attr_span_begin, @@ -119,7 +135,7 @@ def __init__(self, manpage_urls: Mapping[str, str]): self._admonitions = { "note": (self.note_open, self.note_close), - "caution": (self.caution_open,self.caution_close), + "caution": (self.caution_open, self.caution_close), "tip": (self.tip_open, self.tip_close), "important": (self.important_open, self.important_close), "warning": (self.warning_open, self.warning_close), @@ -128,13 +144,15 @@ def __init__(self, manpage_urls: Mapping[str, str]): def _join_block(self, ls: Iterable[str]) -> str: return "".join(ls) + def _join_inline(self, ls: Iterable[str]) -> str: return "".join(ls) def admonition_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - tag = token.meta['kind'] + tag = token.meta["kind"] self._admonition_stack.append(tag) return self._admonitions[tag][0](token, tokens, i) + def admonition_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: return self._admonitions[self._admonition_stack.pop()][1](token, tokens, i) @@ -147,184 +165,264 @@ def do_one(i: int, token: Token) -> str: return self.rules[token.type](tokens[i], tokens, i) else: raise NotImplementedError("md token not supported yet", token) + return self._join_block(map(lambda arg: do_one(*arg), enumerate(tokens))) + def renderInline(self, tokens: Sequence[Token]) -> str: def do_one(i: int, token: Token) -> str: if token.type in self.rules: return self.rules[token.type](tokens[i], tokens, i) else: raise NotImplementedError("md token not supported yet", token) + return self._join_inline(map(lambda arg: do_one(*arg), enumerate(tokens))) def text(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str: # NixOS-specific roles are documented at /doc/README.md (with reverse reference) raise RuntimeError("md token not supported", token) + def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def example_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def example_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def example_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def example_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def image(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def figure_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def figure_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def figure_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def figure_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def table_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def table_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def thead_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def thead_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def tr_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def tr_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def th_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def th_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def tbody_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def tbody_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def td_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def td_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def footnote_ref(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def footnote_block_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) - def footnote_block_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: + + def footnote_block_close( + self, token: Token, tokens: Sequence[Token], i: int + ) -> str: raise RuntimeError("md token not supported", token) + def footnote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def footnote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def footnote_anchor(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported", token) + def _is_escaped(src: str, pos: int) -> bool: found = 0 - while pos >= 0 and src[pos] == '\\': + while pos >= 0 and src[pos] == "\\": found += 1 pos -= 1 return found % 2 == 1 + # the contents won't be split apart in the regex because spacing rules get messy here _ATTR_SPAN_PATTERN = re.compile(r"\{([^}]*)\}") # this one is for blocks with attrs. we want to use it with fullmatch() to deconstruct an info. _ATTR_BLOCK_PATTERN = re.compile(r"\s*\{([^}]*)\}\s*") + def _parse_attrs(s: str) -> Optional[tuple[Optional[str], list[str]]]: (id, classes) = (None, []) for part in s.split(): - if part.startswith('#'): + if part.startswith("#"): if id is not None: - return None # just bail on multiple ids instead of trying to recover + return None # just bail on multiple ids instead of trying to recover id = part[1:] - elif part.startswith('.'): + elif part.startswith("."): classes.append(part[1:]) else: - return None # no support for key=value attrs like in pandoc + return None # no support for key=value attrs like in pandoc return (id, classes) -def _parse_blockattrs(info: str) -> Optional[tuple[AttrBlockKind, Optional[str], list[str]]]: + +def _parse_blockattrs( + info: str, +) -> Optional[tuple[AttrBlockKind, Optional[str], list[str]]]: if (m := _ATTR_BLOCK_PATTERN.fullmatch(info)) is None: return None if (parsed_attrs := _parse_attrs(m[1])) is None: @@ -336,16 +434,17 @@ def _parse_blockattrs(info: str) -> Optional[tuple[AttrBlockKind, Optional[str], # don't want to support ids for admonitions just yet if id is not None: return None - return ('admonition', id, classes) - if classes == ['example']: - return ('example', id, classes) - elif classes == ['figure']: - return ('figure', id, classes) + return ("admonition", id, classes) + if classes == ["example"]: + return ("example", id, classes) + elif classes == ["figure"]: + return ("figure", id, classes) return None + def _attr_span_plugin(md: markdown_it.MarkdownIt) -> None: def attr_span(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool: - if state.src[state.pos] != '[': + if state.src[state.pos] != "[": return False if _is_escaped(state.src, state.pos - 1): return False @@ -358,7 +457,7 @@ def attr_span(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool return False # match id and classes in any combination - match = _ATTR_SPAN_PATTERN.match(state.src[label_end + 1 : ]) + match = _ATTR_SPAN_PATTERN.match(state.src[label_end + 1 :]) if not match: return False @@ -369,9 +468,9 @@ def attr_span(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool token = state.push("attr_span_begin", "span", 1) if id: - token.attrs['id'] = id + token.attrs["id"] = id if classes: - token.attrs['class'] = " ".join(classes) + token.attrs["class"] = " ".join(classes) state.pos = label_begin state.posMax = label_end @@ -385,14 +484,17 @@ def attr_span(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool md.inline.ruler.before("link", "attr_span", attr_span) + def _inline_comment_plugin(md: markdown_it.MarkdownIt) -> None: - def inline_comment(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool: - if state.src[state.pos : state.pos + 4] != '': # --> + if state.src[i : i + 3] == "-->": # --> state.pos = i + 3 return True @@ -400,13 +502,18 @@ def inline_comment(state: markdown_it.rules_inline.StateInline, silent: bool) -> md.inline.ruler.after("autolink", "inline_comment", inline_comment) + def _block_comment_plugin(md: markdown_it.MarkdownIt) -> None: - def block_comment(state: markdown_it.rules_block.StateBlock, startLine: int, endLine: int, - silent: bool) -> bool: + def block_comment( + state: markdown_it.rules_block.StateBlock, + startLine: int, + endLine: int, + silent: bool, + ) -> bool: pos = state.bMarks[startLine] + state.tShift[startLine] posMax = state.eMarks[startLine] - if state.src[pos : pos + 4] != '': + if state.src[posMax - 3 : posMax] == "-->": state.line = nextLine + 1 return True @@ -424,32 +531,36 @@ def block_comment(state: markdown_it.rules_block.StateBlock, startLine: int, end md.block.ruler.after("code", "block_comment", block_comment) + _HEADER_ID_RE = re.compile(r"\s*\{\s*\#([\w.-]+)\s*\}\s*$") + def _heading_ids(md: markdown_it.MarkdownIt) -> None: def heading_ids(state: markdown_it.rules_core.StateCore) -> None: tokens = state.tokens # this is purposely simple and doesn't support classes or other kinds of attributes. - for (i, token) in enumerate(tokens): - if token.type == 'heading_open': + for i, token in enumerate(tokens): + if token.type == "heading_open": children = tokens[i + 1].children assert children is not None - if len(children) == 0 or children[-1].type != 'text': + if len(children) == 0 or children[-1].type != "text": continue if m := _HEADER_ID_RE.search(children[-1].content): - tokens[i].attrs['id'] = m[1] - children[-1].content = children[-1].content[:-len(m[0])].rstrip() + tokens[i].attrs["id"] = m[1] + children[-1].content = children[-1].content[: -len(m[0])].rstrip() md.core.ruler.before("replacements", "heading_ids", heading_ids) + def _footnote_ids(md: markdown_it.MarkdownIt) -> None: """generate ids for footnotes, their refs, and their backlinks. the ids we - generate here are derived from the footnote label, making numeric footnote - labels invalid. + generate here are derived from the footnote label, making numeric footnote + labels invalid. """ + def generate_ids(src: str, tokens: Sequence[Token]) -> None: for token in tokens: - if token.type == 'footnote_open': + if token.type == "footnote_open": if token.meta["label"][:1].isdigit(): assert token.map raise SrcError( @@ -457,13 +568,17 @@ def generate_ids(src: str, tokens: Sequence[Token]) -> None: description="invalid footnote label", token=token, ) - token.attrs['id'] = token.meta["label"] - elif token.type == 'footnote_anchor': - token.meta['target'] = f'{token.meta["label"]}.__back.{token.meta["subId"]}' - elif token.type == 'footnote_ref': - token.attrs['id'] = f'{token.meta["label"]}.__back.{token.meta["subId"]}' - token.meta['target'] = token.meta["label"] - elif token.type == 'inline': + token.attrs["id"] = token.meta["label"] + elif token.type == "footnote_anchor": + token.meta["target"] = ( + f"{token.meta['label']}.__back.{token.meta['subId']}" + ) + elif token.type == "footnote_ref": + token.attrs["id"] = ( + f"{token.meta['label']}.__back.{token.meta['subId']}" + ) + token.meta["target"] = token.meta["label"] + elif token.type == "inline": assert token.children is not None generate_ids(src, token.children) @@ -472,6 +587,7 @@ def footnote_ids(state: markdown_it.rules_core.StateCore) -> None: md.core.ruler.after("footnote_tail", "footnote_ids", footnote_ids) + def _compact_list_attr(md: markdown_it.MarkdownIt) -> None: @dataclasses.dataclass class Entry: @@ -485,20 +601,21 @@ def compact_list_attr(state: markdown_it.rules_core.StateCore) -> None: # signify this with a special css class on list elements instead. stack = [] for token in state.tokens: - if token.type in [ 'bullet_list_open', 'ordered_list_open' ]: - stack.append(Entry(token, cast(int, token.attrs.get('start', 1)))) - elif token.type in [ 'bullet_list_close', 'ordered_list_close' ]: + if token.type in ["bullet_list_open", "ordered_list_open"]: + stack.append(Entry(token, cast(int, token.attrs.get("start", 1)))) + elif token.type in ["bullet_list_close", "ordered_list_close"]: lst = stack.pop() - lst.head.meta['compact'] = lst.compact - if token.type == 'ordered_list_close': - lst.head.meta['end'] = lst.end - 1 - elif len(stack) > 0 and token.type == 'paragraph_open' and not token.hidden: + lst.head.meta["compact"] = lst.compact + if token.type == "ordered_list_close": + lst.head.meta["end"] = lst.end - 1 + elif len(stack) > 0 and token.type == "paragraph_open" and not token.hidden: stack[-1].compact = False - elif token.type == 'list_item_open': + elif token.type == "list_item_open": stack[-1].end += 1 md.core.ruler.push("compact_list_attr", compact_list_attr) + def _block_attr(md: markdown_it.MarkdownIt) -> None: def assert_never(value: NoReturn) -> NoReturn: assert False @@ -506,47 +623,49 @@ def assert_never(value: NoReturn) -> NoReturn: def block_attr(state: markdown_it.rules_core.StateCore) -> None: stack = [] for token in state.tokens: - if token.type == 'container_blockattr_open': + if token.type == "container_blockattr_open": if (parsed_attrs := _parse_blockattrs(token.info)) is None: # if we get here we've missed a possible case in the plugin validate function raise RuntimeError("this should be unreachable") kind, id, classes = parsed_attrs - if kind == 'admonition': - token.type = 'admonition_open' - token.meta['kind'] = classes[0] - stack.append('admonition_close') - elif kind == 'example': - token.type = 'example_open' + if kind == "admonition": + token.type = "admonition_open" + token.meta["kind"] = classes[0] + stack.append("admonition_close") + elif kind == "example": + token.type = "example_open" if id is not None: - token.attrs['id'] = id - stack.append('example_close') - elif kind == 'figure': - token.type = 'figure_open' + token.attrs["id"] = id + stack.append("example_close") + elif kind == "figure": + token.type = "figure_open" if id is not None: - token.attrs['id'] = id - stack.append('figure_close') + token.attrs["id"] = id + stack.append("figure_close") else: assert_never(kind) - elif token.type == 'container_blockattr_close': + elif token.type == "container_blockattr_close": token.type = stack.pop() md.core.ruler.push("block_attr", block_attr) + def _block_titles(block: str) -> Callable[[markdown_it.MarkdownIt], None]: - open, close = f'{block}_open', f'{block}_close' - title_open, title_close = f'{block}_title_open', f'{block}_title_close' + open, close = f"{block}_open", f"{block}_close" + title_open, title_close = f"{block}_title_open", f"{block}_title_close" """ find title headings of blocks and stick them into meta for renderers, then remove them from the token stream. also checks whether any block contains a non-title heading since those would make toc generation extremely complicated. """ + def block_titles(state: markdown_it.rules_core.StateCore) -> None: in_example = [None] for i, token in enumerate(state.tokens): if token.type == open: - if state.tokens[i + 1].type == 'heading_open': - assert state.tokens[i + 3].type == 'heading_close' + if state.tokens[i + 1].type == "heading_open": + assert state.tokens[i + 3].type == "heading_close" state.tokens[i + 1].type = title_open state.tokens[i + 3].type = title_close else: @@ -558,7 +677,7 @@ def block_titles(state: markdown_it.rules_core.StateCore) -> None: in_example.append(token) elif token.type == close: in_example.pop() - elif token.type == 'heading_open' and in_example[-1]: + elif token.type == "heading_open" and in_example[-1]: assert token.map started_at = in_example[-1] @@ -566,7 +685,7 @@ def block_titles(state: markdown_it.rules_core.StateCore) -> None: raise SrcError( description=f"unexpected non-title heading in `{block_display}`; are you missing a `:::`?\n" - f"Note: blocks like `{block_display}` are only allowed to contain a single heading in order to simplify TOC generation.", + f"Note: blocks like `{block_display}` are only allowed to contain a single heading in order to simplify TOC generation.", src=state.src, tokens={ f"`{block_display}` block": started_at, @@ -579,7 +698,9 @@ def do_add(md: markdown_it.MarkdownIt) -> None: return do_add -TR = TypeVar('TR', bound='Renderer') + +TR = TypeVar("TR", bound="Renderer") + class Converter(ABC, Generic[TR]): # we explicitly disable markdown-it rendering support and use our own entirely. @@ -592,9 +713,15 @@ class ForbiddenRenderer(markdown_it.renderer.RendererProtocol): def __init__(self, parser: Optional[markdown_it.MarkdownIt]): pass - def render(self, tokens: Sequence[Token], options: OptionsDict, - env: MutableMapping[str, Any]) -> str: - raise NotImplementedError("do not use Converter._md.renderer. 'tis a silly place") + def render( + self, + tokens: Sequence[Token], + options: OptionsDict, + env: MutableMapping[str, Any], + ) -> str: + raise NotImplementedError( + "do not use Converter._md.renderer. 'tis a silly place" + ) _renderer: TR @@ -602,13 +729,13 @@ def __init__(self) -> None: self._md = markdown_it.MarkdownIt( "commonmark", { - 'maxNesting': 100, # default is 20 - 'html': False, # not useful since we target many formats - 'typographer': True, # required for smartquotes + "maxNesting": 100, # default is 20 + "html": False, # not useful since we target many formats + "typographer": True, # required for smartquotes }, - renderer_cls=self.ForbiddenRenderer + renderer_cls=self.ForbiddenRenderer, ) - self._md.enable('table') + self._md.enable("table") self._md.use( container_plugin, name="blockattr", diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/options.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/options.py index 75fbeadce1d09..13e4e0f95aed7 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/options.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/options.py @@ -23,14 +23,16 @@ from .md import Converter, md_escape, md_make_code from .types import OptionLoc, Option, RenderedOption, AnchorStyle + def option_is(option: Option, key: str, typ: str) -> Optional[dict[str, str]]: if key not in option: return None if type(option[key]) != dict: return None - if option[key].get('_type') != typ: # type: ignore[union-attr] + if option[key].get("_type") != typ: # type: ignore[union-attr] return None - return option[key] # type: ignore[return-value] + return option[key] # type: ignore[return-value] + class BaseConverter(Converter[md.TR], Generic[md.TR]): __option_block_separator__: str @@ -44,9 +46,20 @@ def __init__(self, revision: str): def _sorted_options(self) -> list[tuple[str, RenderedOption]]: keys = list(self._options.keys()) - keys.sort(key=lambda opt: [ (0 if p.startswith("enable") else 1 if p.startswith("package") else 2, p) - for p in self._options[opt].loc ]) - return [ (k, self._options[k]) for k in keys ] + keys.sort( + key=lambda opt: [ + ( + 0 + if p.startswith("enable") + else 1 + if p.startswith("package") + else 2, + p, + ) + for p in self._options[opt].loc + ] + ) + return [(k, self._options[k]) for k in keys] def _format_decl_def_loc(self, loc: OptionLoc) -> tuple[Optional[str], str]: # locations can be either plain strings (specific to nixpkgs), or attrsets @@ -55,34 +68,39 @@ def _format_decl_def_loc(self, loc: OptionLoc) -> tuple[Optional[str], str]: # Hyperlink the filename either to the NixOS github # repository (if it’s a module and we have a revision number), # or to the local filesystem. - if not loc.startswith('/'): - if self._revision == 'local': + if not loc.startswith("/"): + if self._revision == "local": href = f"https://github.com/NixOS/nixpkgs/blob/master/{loc}" else: - href = f"https://github.com/NixOS/nixpkgs/blob/{self._revision}/{loc}" + href = ( + f"https://github.com/NixOS/nixpkgs/blob/{self._revision}/{loc}" + ) else: href = f"file://{loc}" # Print the filename and make it user-friendly by replacing the # /nix/store/ prefix by the default location of nixos # sources. - if not loc.startswith('/'): + if not loc.startswith("/"): name = f"" - elif 'nixops' in loc and '/nix/' in loc: - name = f"" + elif "nixops" in loc and "/nix/" in loc: + name = f"" else: name = loc return (href, name) else: - return (loc['url'] if 'url' in loc else None, loc['name']) + return (loc["url"] if "url" in loc else None, loc["name"]) @abstractmethod - def _decl_def_header(self, header: str) -> list[str]: raise NotImplementedError() + def _decl_def_header(self, header: str) -> list[str]: + raise NotImplementedError() @abstractmethod - def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: raise NotImplementedError() + def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: + raise NotImplementedError() @abstractmethod - def _decl_def_footer(self) -> list[str]: raise NotImplementedError() + def _decl_def_footer(self) -> list[str]: + raise NotImplementedError() def _render_decl_def(self, header: str, locs: list[OptionLoc]) -> list[str]: result = [] @@ -94,11 +112,11 @@ def _render_decl_def(self, header: str, locs: list[OptionLoc]) -> list[str]: return result def _render_code(self, option: Option, key: str) -> list[str]: - if lit := option_is(option, key, 'literalMD'): - return [ self._render(f"*{key.capitalize()}:*\n{lit['text']}") ] - elif lit := option_is(option, key, 'literalExpression'): - code = md_make_code(lit['text']) - return [ self._render(f"*{key.capitalize()}:*\n{code}") ] + if lit := option_is(option, key, "literalMD"): + return [self._render(f"*{key.capitalize()}:*\n{lit['text']}")] + elif lit := option_is(option, key, "literalExpression"): + code = md_make_code(lit["text"]) + return [self._render(f"*{key.capitalize()}:*\n{code}")] elif key in option: raise Exception(f"{key} has unrecognized type", option[key]) else: @@ -106,57 +124,61 @@ def _render_code(self, option: Option, key: str) -> list[str]: def _render_description(self, desc: str | dict[str, str]) -> list[str]: if isinstance(desc, str): - return [ self._render(desc) ] if desc else [] - elif isinstance(desc, dict) and desc.get('_type') == 'mdDoc': - return [ self._render(desc['text']) ] if desc['text'] else [] + return [self._render(desc)] if desc else [] + elif isinstance(desc, dict) and desc.get("_type") == "mdDoc": + return [self._render(desc["text"])] if desc["text"] else [] else: raise Exception("description has unrecognized type", desc) @abstractmethod - def _related_packages_header(self) -> list[str]: raise NotImplementedError() + def _related_packages_header(self) -> list[str]: + raise NotImplementedError() def _convert_one(self, option: dict[str, Any]) -> list[str]: blocks: list[list[str]] = [] - if desc := option.get('description'): + if desc := option.get("description"): blocks.append(self._render_description(desc)) - if typ := option.get('type'): - ro = " *(read only)*" if option.get('readOnly', False) else "" - blocks.append([ self._render(f"*Type:*\n{md_escape(typ)}{ro}") ]) + if typ := option.get("type"): + ro = " *(read only)*" if option.get("readOnly", False) else "" + blocks.append([self._render(f"*Type:*\n{md_escape(typ)}{ro}")]) - if option.get('default'): - blocks.append(self._render_code(option, 'default')) - if option.get('example'): - blocks.append(self._render_code(option, 'example')) + if option.get("default"): + blocks.append(self._render_code(option, "default")) + if option.get("example"): + blocks.append(self._render_code(option, "example")) - if related := option.get('relatedPackages'): + if related := option.get("relatedPackages"): blocks.append(self._related_packages_header()) blocks[-1].append(self._render(related)) - if decl := option.get('declarations'): + if decl := option.get("declarations"): blocks.append(self._render_decl_def("Declared by", decl)) - if defs := option.get('definitions'): + if defs := option.get("definitions"): blocks.append(self._render_decl_def("Defined by", defs)) - for part in [ p for p in blocks[0:-1] if p ]: + for part in [p for p in blocks[0:-1] if p]: part.append(self.__option_block_separator__) - return [ l for part in blocks for l in part ] + return [l for part in blocks for l in part] # this could return a TState parameter, but that does not allow dependent types and # will cause headaches when using BaseConverter as a type bound anywhere. Any is the # next best thing we can use, and since this is internal it will be mostly safe. @abstractmethod - def _parallel_render_prepare(self) -> Any: raise NotImplementedError() + def _parallel_render_prepare(self) -> Any: + raise NotImplementedError() + # this should return python 3.11's Self instead to ensure that a prepare+finish # round-trip ends up with an object of the same type. for now we'll use BaseConverter # since it's good enough so far. @classmethod @abstractmethod - def _parallel_render_init_worker(cls, a: Any) -> BaseConverter[md.TR]: raise NotImplementedError() + def _parallel_render_init_worker(cls, a: Any) -> BaseConverter[md.TR]: + raise NotImplementedError() def _render_option(self, name: str, option: dict[str, Any]) -> RenderedOption: try: - return RenderedOption(option['loc'], self._convert_one(option)) + return RenderedOption(option["loc"], self._convert_one(option)) except Exception as e: raise Exception(f"Failed to render option {name}") from e @@ -165,39 +187,54 @@ def _parallel_render_step(cls, s: BaseConverter[md.TR], a: Any) -> RenderedOptio return s._render_option(*a) def add_options(self, options: dict[str, Any]) -> None: - mapped = parallel.map(self._parallel_render_step, options.items(), 100, - self._parallel_render_init_worker, self._parallel_render_prepare()) - for (name, option) in zip(options.keys(), mapped): + mapped = parallel.map( + self._parallel_render_step, + options.items(), + 100, + self._parallel_render_init_worker, + self._parallel_render_prepare(), + ) + for name, option in zip(options.keys(), mapped): self._options[name] = option @abstractmethod - def finalize(self) -> str: raise NotImplementedError() + def finalize(self) -> str: + raise NotImplementedError() + class OptionDocsRestrictions: def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported in options doc", token) + def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported in options doc", token) + def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported in options doc", token) + def example_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise RuntimeError("md token not supported in options doc", token) + class OptionsManpageRenderer(OptionDocsRestrictions, ManpageRenderer): pass + class ManpageConverter(BaseConverter[OptionsManpageRenderer]): __option_block_separator__ = ".sp" _options_by_id: dict[str, str] _links_in_last_description: Optional[list[str]] = None - def __init__(self, revision: str, - header: list[str] | None, - footer: list[str] | None, - *, - # only for parallel rendering - _options_by_id: Optional[dict[str, str]] = None): + def __init__( + self, + revision: str, + header: list[str] | None, + footer: list[str] | None, + *, + # only for parallel rendering + _options_by_id: Optional[dict[str, str]] = None, + ): super().__init__(revision) self._options_by_id = _options_by_id or {} self._renderer = OptionsManpageRenderer({}, self._options_by_id) @@ -209,8 +246,9 @@ def _parallel_render_prepare(self) -> Any: self._revision, self._header, self._footer, - { '_options_by_id': self._options_by_id }, + {"_options_by_id": self._options_by_id}, ) + @classmethod def _parallel_render_init_worker(cls, a: Any) -> ManpageConverter: return cls(a[0], a[1], a[2], **a[3]) @@ -222,8 +260,8 @@ def _render_option(self, name: str, option: dict[str, Any]) -> RenderedOption: return result._replace(links=links) def add_options(self, options: dict[str, Any]) -> None: - for (k, v) in options.items(): - self._options_by_id[f'#{make_xml_id(f"opt-{k}")}'] = k + for k, v in options.items(): + self._options_by_id[f"#{make_xml_id(f'opt-{k}')}"] = k return super().add_options(options) def _render_code(self, option: dict[str, Any], key: str) -> list[str]: @@ -235,21 +273,17 @@ def _render_code(self, option: dict[str, Any], key: str) -> list[str]: def _related_packages_header(self) -> list[str]: return [ - '\\fIRelated packages:\\fP', - '.sp', + "\\fIRelated packages:\\fP", + ".sp", ] def _decl_def_header(self, header: str) -> list[str]: return [ - f'\\fI{man_escape(header)}:\\fP', + f"\\fI{man_escape(header)}:\\fP", ] def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: - return [ - '.RS 4', - f'\\fB{man_escape(name)}\\fP', - '.RE' - ] + return [".RS 4", f"\\fB{man_escape(name)}\\fP", ".RE"] def _decl_def_footer(self) -> list[str]: return [] @@ -262,26 +296,32 @@ def finalize(self) -> str: else: result += [ r'''.TH "CONFIGURATION\&.NIX" "5" "01/01/1980" "NixOS" "NixOS Reference Pages"''', - r'''.\" disable hyphenation''', - r'''.nh''', - r'''.\" disable justification (adjust text to left margin only)''', - r'''.ad l''', - r'''.\" enable line breaks after slashes''', - r'''.cflags 4 /''', + r""".\" disable hyphenation""", + r""".nh""", + r""".\" disable justification (adjust text to left margin only)""", + r""".ad l""", + r""".\" enable line breaks after slashes""", + r""".cflags 4 /""", r'''.SH "NAME"''', - self._render('{file}`configuration.nix` - NixOS system configuration specification'), + self._render( + "{file}`configuration.nix` - NixOS system configuration specification" + ), r'''.SH "DESCRIPTION"''', - r'''.PP''', - self._render('The file {file}`/etc/nixos/configuration.nix` contains the ' - 'declarative specification of your NixOS system configuration. ' - 'The command {command}`nixos-rebuild` takes this file and ' - 'realises the system configuration specified therein.'), + r""".PP""", + self._render( + "The file {file}`/etc/nixos/configuration.nix` contains the " + "declarative specification of your NixOS system configuration. " + "The command {command}`nixos-rebuild` takes this file and " + "realises the system configuration specified therein." + ), r'''.SH "OPTIONS"''', - r'''.PP''', - self._render('You can use the following options in {file}`configuration.nix`.'), + r""".PP""", + self._render( + "You can use the following options in {file}`configuration.nix`." + ), ] - for (name, opt) in self._sorted_options(): + for name, opt in self._sorted_options(): result += [ ".PP", f"\\fB{man_escape(name)}\\fR", @@ -293,10 +333,10 @@ def finalize(self) -> str: md_links = "" for i in range(0, len(links)): md_links += "\n" if i > 0 else "" - if links[i].startswith('#opt-'): - md_links += f"{i+1}. see the {{option}}`{self._options_by_id[links[i]]}` option" + if links[i].startswith("#opt-"): + md_links += f"{i + 1}. see the {{option}}`{self._options_by_id[links[i]]}` option" else: - md_links += f"{i+1}. " + md_escape(links[i]) + md_links += f"{i + 1}. " + md_escape(links[i]) result.append(self._render(md_links)) result.append(".RE") @@ -306,22 +346,29 @@ def finalize(self) -> str: else: result += [ r'''.SH "AUTHORS"''', - r'''.PP''', - r'''Eelco Dolstra and the Nixpkgs/NixOS contributors''', + r""".PP""", + r"""Eelco Dolstra and the Nixpkgs/NixOS contributors""", ] return "\n".join(result) + class OptionsCommonMarkRenderer(OptionDocsRestrictions, CommonMarkRenderer): pass + class CommonMarkConverter(BaseConverter[OptionsCommonMarkRenderer]): __option_block_separator__ = "" _anchor_style: AnchorStyle _anchor_prefix: str - - def __init__(self, manpage_urls: Mapping[str, str], revision: str, anchor_style: AnchorStyle = AnchorStyle.NONE, anchor_prefix: str = ""): + def __init__( + self, + manpage_urls: Mapping[str, str], + revision: str, + anchor_style: AnchorStyle = AnchorStyle.NONE, + anchor_prefix: str = "", + ): super().__init__(revision) self._renderer = OptionsCommonMarkRenderer(manpage_urls) self._anchor_style = anchor_style @@ -329,20 +376,21 @@ def __init__(self, manpage_urls: Mapping[str, str], revision: str, anchor_style: def _parallel_render_prepare(self) -> Any: return (self._renderer._manpage_urls, self._revision) + @classmethod def _parallel_render_init_worker(cls, a: Any) -> CommonMarkConverter: return cls(*a) def _related_packages_header(self) -> list[str]: - return [ "*Related packages:*" ] + return ["*Related packages:*"] def _decl_def_header(self, header: str) -> list[str]: - return [ f"*{header}:*" ] + return [f"*{header}:*"] def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: if href is not None: - return [ f" - [{md_escape(name)}]({href})" ] - return [ f" - {md_escape(name)}" ] + return [f" - [{md_escape(name)}]({href})"] + return [f" - {md_escape(name)}"] def _decl_def_footer(self) -> list[str]: return [] @@ -359,7 +407,7 @@ def _make_anchor_suffix(self, loc: list[str]) -> str: def finalize(self) -> str: result = [] - for (name, opt) in self._sorted_options(): + for name, opt in self._sorted_options(): anchor_suffix = self._make_anchor_suffix(opt.loc) result.append(f"## {md_escape(name)}{anchor_suffix}\n") result += opt.lines @@ -367,9 +415,11 @@ def finalize(self) -> str: return "\n".join(result) + class OptionsAsciiDocRenderer(OptionDocsRestrictions, AsciiDocRenderer): pass + class AsciiDocConverter(BaseConverter[OptionsAsciiDocRenderer]): __option_block_separator__ = "" @@ -379,20 +429,21 @@ def __init__(self, manpage_urls: Mapping[str, str], revision: str): def _parallel_render_prepare(self) -> Any: return (self._renderer._manpage_urls, self._revision) + @classmethod def _parallel_render_init_worker(cls, a: Any) -> AsciiDocConverter: return cls(*a) def _related_packages_header(self) -> list[str]: - return [ "__Related packages:__" ] + return ["__Related packages:__"] def _decl_def_header(self, header: str) -> list[str]: - return [ f"__{header}:__\n" ] + return [f"__{header}:__\n"] def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: if href is not None: - return [ f"* link:{quote(href, safe='/:')}[{asciidoc_escape(name)}]" ] - return [ f"* {asciidoc_escape(name)}" ] + return [f"* link:{quote(href, safe='/:')}[{asciidoc_escape(name)}]"] + return [f"* {asciidoc_escape(name)}"] def _decl_def_footer(self) -> list[str]: return [] @@ -400,30 +451,40 @@ def _decl_def_footer(self) -> list[str]: def finalize(self) -> str: result = [] - for (name, opt) in self._sorted_options(): + for name, opt in self._sorted_options(): result.append(f"== {asciidoc_escape(name)}\n") result += opt.lines result.append("\n\n") return "\n".join(result) + class OptionsHTMLRenderer(OptionDocsRestrictions, HTMLRenderer): # TODO docbook compat. must be removed together with the matching docbook handlers. def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - token.meta['compact'] = False + token.meta["compact"] = False return super().ordered_list_open(token, tokens, i) + def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str: - token.meta['compact'] = False + token.meta["compact"] = False return super().bullet_list_open(token, tokens, i) + def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str: info = f" {html.escape(token.info, True)}" if token.info != "" else "" return f'
    {html.escape(token.content)}
    ' + class HTMLConverter(BaseConverter[OptionsHTMLRenderer]): __option_block_separator__ = "" - def __init__(self, manpage_urls: Mapping[str, str], revision: str, - varlist_id: str, id_prefix: str, xref_targets: Mapping[str, XrefTarget]): + def __init__( + self, + manpage_urls: Mapping[str, str], + revision: str, + varlist_id: str, + id_prefix: str, + xref_targets: Mapping[str, XrefTarget], + ): super().__init__(revision) self._xref_targets = xref_targets self._varlist_id = varlist_id @@ -431,8 +492,14 @@ def __init__(self, manpage_urls: Mapping[str, str], revision: str, self._renderer = OptionsHTMLRenderer(manpage_urls, self._xref_targets) def _parallel_render_prepare(self) -> Any: - return (self._renderer._manpage_urls, self._revision, - self._varlist_id, self._id_prefix, self._xref_targets) + return ( + self._renderer._manpage_urls, + self._revision, + self._varlist_id, + self._id_prefix, + self._xref_targets, + ) + @classmethod def _parallel_render_init_worker(cls, a: Any) -> HTMLConverter: return cls(*a) @@ -445,7 +512,7 @@ def _related_packages_header(self) -> list[str]: def _decl_def_header(self, header: str) -> list[str]: return [ f'

    {header}:

    ', - '' + '
    ', ] def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: @@ -454,13 +521,13 @@ def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: return [ "" + f"{html.escape(name)}", + "", + "", ] def _decl_def_footer(self) -> list[str]: - return [ "
    ", f'', - f'{html.escape(name)}', - '', - "
    " ] + return [""] def finalize(self) -> str: result = [] @@ -471,73 +538,78 @@ def finalize(self) -> str: '
    ', ] - for (name, opt) in self._sorted_options(): + for name, opt in self._sorted_options(): id = make_xml_id(self._id_prefix + name) target = self._xref_targets[id] result += [ - '
    ', + "
    ", ' ', # docbook compat, these could be one tag f' ' # no spaces here (and string merging) for docbook output compat f'{html.escape(name)}', - ' ', - ' ', - '
    ', - '
    ', + " ", + " ", + "", + "
    ", ] result += opt.lines result += [ "
    ", ] - result += [ - "
    ", - "" - ] + result += [" ", ""] return "\n".join(result) + def _build_cli_manpage(p: argparse.ArgumentParser) -> None: - p.add_argument('--revision', required=True) + p.add_argument("--revision", required=True) p.add_argument("--header", type=Path) p.add_argument("--footer", type=Path) p.add_argument("infile") p.add_argument("outfile") -def parse_anchor_style(value: str|AnchorStyle) -> AnchorStyle: + +def parse_anchor_style(value: str | AnchorStyle) -> AnchorStyle: if isinstance(value, AnchorStyle): # Used by `argparse.add_argument`'s `default` return value try: return AnchorStyle(value.lower()) except ValueError: - raise argparse.ArgumentTypeError(f"Invalid value {value}\nExpected one of {', '.join(style.value for style in AnchorStyle)}") + raise argparse.ArgumentTypeError( + f"Invalid value {value}\nExpected one of {', '.join(style.value for style in AnchorStyle)}" + ) + def _build_cli_commonmark(p: argparse.ArgumentParser) -> None: - p.add_argument('--manpage-urls', required=True) - p.add_argument('--revision', required=True) + p.add_argument("--manpage-urls", required=True) + p.add_argument("--revision", required=True) p.add_argument( - '--anchor-style', + "--anchor-style", required=False, default=AnchorStyle.NONE.value, - choices = [style.value for style in AnchorStyle], - help = "(default: %(default)s) Anchor style to use for links to options. \nOnly none is standard CommonMark." + choices=[style.value for style in AnchorStyle], + help="(default: %(default)s) Anchor style to use for links to options. \nOnly none is standard CommonMark.", ) - p.add_argument('--anchor-prefix', + p.add_argument( + "--anchor-prefix", required=False, default="", - help="(default: no prefix) String to prepend to anchor ids. Not used when anchor style is none." + help="(default: no prefix) String to prepend to anchor ids. Not used when anchor style is none.", ) p.add_argument("infile") p.add_argument("outfile") + def _build_cli_asciidoc(p: argparse.ArgumentParser) -> None: - p.add_argument('--manpage-urls', required=True) - p.add_argument('--revision', required=True) + p.add_argument("--manpage-urls", required=True) + p.add_argument("--revision", required=True) p.add_argument("infile") p.add_argument("outfile") + def _run_cli_manpage(args: argparse.Namespace) -> None: header = None footer = None @@ -551,49 +623,55 @@ def _run_cli_manpage(args: argparse.Namespace) -> None: footer = f.read().splitlines() md = ManpageConverter( - revision = args.revision, - header = header, - footer = footer, + revision=args.revision, + header=header, + footer=footer, ) - with open(args.infile, 'r') as f: + with open(args.infile, "r") as f: md.add_options(json.load(f)) - with open(args.outfile, 'w') as f: + with open(args.outfile, "w") as f: f.write(md.finalize()) + def _run_cli_commonmark(args: argparse.Namespace) -> None: - with open(args.manpage_urls, 'r') as manpage_urls: - md = CommonMarkConverter(json.load(manpage_urls), - revision = args.revision, - anchor_style = parse_anchor_style(args.anchor_style), - anchor_prefix = args.anchor_prefix) + with open(args.manpage_urls, "r") as manpage_urls: + md = CommonMarkConverter( + json.load(manpage_urls), + revision=args.revision, + anchor_style=parse_anchor_style(args.anchor_style), + anchor_prefix=args.anchor_prefix, + ) - with open(args.infile, 'r') as f: + with open(args.infile, "r") as f: md.add_options(json.load(f)) - with open(args.outfile, 'w') as f: + with open(args.outfile, "w") as f: f.write(md.finalize()) + def _run_cli_asciidoc(args: argparse.Namespace) -> None: - with open(args.manpage_urls, 'r') as manpage_urls: - md = AsciiDocConverter(json.load(manpage_urls), revision = args.revision) + with open(args.manpage_urls, "r") as manpage_urls: + md = AsciiDocConverter(json.load(manpage_urls), revision=args.revision) - with open(args.infile, 'r') as f: + with open(args.infile, "r") as f: md.add_options(json.load(f)) - with open(args.outfile, 'w') as f: + with open(args.outfile, "w") as f: f.write(md.finalize()) + def build_cli(p: argparse.ArgumentParser) -> None: - formats = p.add_subparsers(dest='format', required=True) - _build_cli_manpage(formats.add_parser('manpage')) - _build_cli_commonmark(formats.add_parser('commonmark')) - _build_cli_asciidoc(formats.add_parser('asciidoc')) + formats = p.add_subparsers(dest="format", required=True) + _build_cli_manpage(formats.add_parser("manpage")) + _build_cli_commonmark(formats.add_parser("commonmark")) + _build_cli_asciidoc(formats.add_parser("asciidoc")) + def run_cli(args: argparse.Namespace) -> None: - if args.format == 'manpage': + if args.format == "manpage": _run_cli_manpage(args) - elif args.format == 'commonmark': + elif args.format == "commonmark": _run_cli_commonmark(args) - elif args.format == 'asciidoc': + elif args.format == "asciidoc": _run_cli_asciidoc(args) else: - raise RuntimeError('format not hooked up', args) + raise RuntimeError("format not hooked up", args) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/parallel.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/parallel.py index ad58bf0264067..ee4ba3c095449 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/parallel.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/parallel.py @@ -6,10 +6,10 @@ from typing import Any, Callable, Iterable, Optional, TypeVar -R = TypeVar('R') -S = TypeVar('S') -T = TypeVar('T') -A = TypeVar('A') +R = TypeVar("R") +S = TypeVar("S") +T = TypeVar("T") +A = TypeVar("A") pool_processes: Optional[int] = None @@ -21,10 +21,12 @@ _map_worker_state_fn: Any = None _map_worker_state_arg: Any = None + def _map_worker_init(*args: Any) -> None: global _map_worker_fn, _map_worker_state_fn, _map_worker_state_arg (_map_worker_fn, _map_worker_state_fn, _map_worker_state_arg) = args + # NOTE: the state argument is never passed by any caller, we only use it as a localized # cache for the created state in lieu of another global. it is effectively a global though. def _map_worker_step(arg: Any, state: Any = []) -> Any: @@ -35,8 +37,14 @@ def _map_worker_step(arg: Any, state: Any = []) -> Any: state.append(_map_worker_state_fn(_map_worker_state_arg)) return _map_worker_fn(state[0], arg) -def map(fn: Callable[[S, T], R], d: Iterable[T], chunk_size: int, - state_fn: Callable[[A], S], state_arg: A) -> list[R]: + +def map( + fn: Callable[[S, T], R], + d: Iterable[T], + chunk_size: int, + state_fn: Callable[[A], S], + state_arg: A, +) -> list[R]: """ `[ fn(state, i) for i in d ]` where `state = state_fn(state_arg)`, but using multiprocessing if `pool_processes` is not `None`. when using multiprocessing is used the state function will @@ -53,6 +61,8 @@ def map(fn: Callable[[S, T], R], d: Iterable[T], chunk_size: int, """ if pool_processes is None: state = state_fn(state_arg) - return [ fn(state, i) for i in d ] - with multiprocessing.Pool(pool_processes, _map_worker_init, (fn, state_fn, state_arg)) as p: + return [fn(state, i) for i in d] + with multiprocessing.Pool( + pool_processes, _map_worker_init, (fn, state_fn, state_arg) + ) as p: return list(p.imap(_map_worker_step, d, chunk_size)) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/redirects.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/redirects.py index e8ddfee895ef1..96b9eccb1e4c8 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/redirects.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/redirects.py @@ -12,11 +12,13 @@ def __init__( divergent_redirects: set[str] = None, identifiers_missing_current_outpath: set[str] = None, identifiers_without_redirects: set[str] = None, - orphan_identifiers: set[str] = None + orphan_identifiers: set[str] = None, ): self.conflicting_anchors = conflicting_anchors or set() self.divergent_redirects = divergent_redirects or set() - self.identifiers_missing_current_outpath = identifiers_missing_current_outpath or set() + self.identifiers_missing_current_outpath = ( + identifiers_missing_current_outpath or set() + ) self.identifiers_without_redirects = identifiers_without_redirects or set() self.orphan_identifiers = orphan_identifiers or set() @@ -53,7 +55,11 @@ def __str__(self): error_messages.append(f""" Keys of the redirects mapping must correspond to some identifier in the source. - {"\n - ".join(self.orphan_identifiers)}""") - if self.identifiers_without_redirects or self.orphan_identifiers or self.identifiers_missing_current_outpath: + if ( + self.identifiers_without_redirects + or self.orphan_identifiers + or self.identifiers_missing_current_outpath + ): error_messages.append(f""" This can happen when an identifier was added, renamed, or removed. @@ -77,7 +83,9 @@ def __str__(self): NixOS: $ nix-shell nixos/doc/manual """) - error_messages.append("NOTE: If your build passes locally and you see this message in CI, you probably need a rebase.") + error_messages.append( + "NOTE: If your build passes locally and you see this message in CI, you probably need a rebase." + ) return "\n".join(error_messages) @@ -100,7 +108,13 @@ def validate(self, initial_xref_targets: dict[str, XrefTarget]): - The first element of an identifier's redirects list must denote its current location. """ xref_targets = {} - ignored_identifier_patterns = ("opt-", "auto-generated-", "function-library-", "service-opt-", "systemd-service-opt") + ignored_identifier_patterns = ( + "opt-", + "auto-generated-", + "function-library-", + "service-opt-", + "systemd-service-opt", + ) for id, target in initial_xref_targets.items(): # filter out automatically generated identifiers from module options and library documentation if id.startswith(ignored_identifier_patterns): @@ -120,18 +134,23 @@ def validate(self, initial_xref_targets: dict[str, XrefTarget]): if identifier not in xref_targets: continue - if not locations or locations[0] != f"{xref_targets[identifier].path}#{identifier}": + if ( + not locations + or locations[0] != f"{xref_targets[identifier].path}#{identifier}" + ): identifiers_missing_current_outpath.add(identifier) for location in locations[1:]: - if '#' in location: - path, anchor = location.split('#') + if "#" in location: + path, anchor = location.split("#") if anchor in identifiers_without_redirects: identifiers_without_redirects.remove(anchor) if location not in client_side_redirects: - client_side_redirects[location] = f"{xref_targets[identifier].path}#{identifier}" + client_side_redirects[location] = ( + f"{xref_targets[identifier].path}#{identifier}" + ) for identifier, xref_target in xref_targets.items(): if xref_target.path == path and anchor == identifier: conflicting_anchors.add(anchor) @@ -143,31 +162,35 @@ def validate(self, initial_xref_targets: dict[str, XrefTarget]): else: divergent_redirects.add(location) - if any([ - conflicting_anchors, - divergent_redirects, - identifiers_missing_current_outpath, - identifiers_without_redirects, - orphan_identifiers - ]): + if any( + [ + conflicting_anchors, + divergent_redirects, + identifiers_missing_current_outpath, + identifiers_without_redirects, + orphan_identifiers, + ] + ): raise RedirectsError( conflicting_anchors=conflicting_anchors, divergent_redirects=divergent_redirects, identifiers_missing_current_outpath=identifiers_missing_current_outpath, identifiers_without_redirects=identifiers_without_redirects, - orphan_identifiers=orphan_identifiers + orphan_identifiers=orphan_identifiers, ) self._xref_targets = xref_targets def get_client_redirects(self, target: str): - paths_to_target = {src for src, dest in self.get_server_redirects().items() if dest == target} + paths_to_target = { + src for src, dest in self.get_server_redirects().items() if dest == target + } client_redirects = {} for locations in self._raw_redirects.values(): for location in locations[1:]: - if '#' not in location: + if "#" not in location: continue - path, anchor = location.split('#') + path, anchor = location.split("#") if path not in [target, *paths_to_target]: continue client_redirects[anchor] = locations[0] @@ -177,10 +200,12 @@ def get_server_redirects(self): server_redirects = {} for identifier, locations in self._raw_redirects.items(): for location in locations[1:]: - if '#' not in location and location not in server_redirects: + if "#" not in location and location not in server_redirects: server_redirects[location] = self._xref_targets[identifier].path return server_redirects def get_redirect_script(self, target: str) -> str: client_redirects = self.get_client_redirects(target) - return self._redirects_script.replace('REDIRECTS_PLACEHOLDER', json.dumps(client_redirects)) + return self._redirects_script.replace( + "REDIRECTS_PLACEHOLDER", json.dumps(client_redirects) + ) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/types.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/types.py index b5c6e91a9b031..560f84a6cf503 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/types.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/types.py @@ -7,13 +7,16 @@ OptionLoc = str | dict[str, str] Option = dict[str, str | dict[str, str] | list[OptionLoc]] + class RenderedOption(NamedTuple): loc: list[str] lines: list[str] links: Optional[list[str]] = None + RenderFn = Callable[[Token, Sequence[Token], int], str] + class AnchorStyle(Enum): NONE = "none" LEGACY = "legacy" diff --git a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/utils.py b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/utils.py index 3377d1fa4fe18..aa0d2fe8c5c60 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/utils.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/nixos_render_docs/utils.py @@ -2,6 +2,7 @@ _frozen_classes: dict[type, type] = {} + # make a derived class freezable (ie, disallow modifications). # we do this by changing the class of an instance at runtime when freeze() # is called, providing a derived class that is exactly the same except @@ -12,10 +13,16 @@ class Freezeable: def freeze(self) -> None: cls = type(self) if not (frozen := _frozen_classes.get(cls)): + def __setattr__(instance: Any, n: str, v: Any) -> None: - raise TypeError(f'{cls.__name__} is frozen') - frozen = type(cls.__name__, (cls,), { - '__setattr__': __setattr__, - }) + raise TypeError(f"{cls.__name__} is frozen") + + frozen = type( + cls.__name__, + (cls,), + { + "__setattr__": __setattr__, + }, + ) _frozen_classes[cls] = frozen self.__class__ = frozen diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_asciidoc.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_asciidoc.py index 3cf5b208f3923..46516b28a78cc 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_asciidoc.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_asciidoc.py @@ -2,15 +2,18 @@ from sample_md import sample1 + class Converter(nrd.md.Converter[nrd.asciidoc.AsciiDocRenderer]): def __init__(self, manpage_urls: dict[str, str]): super().__init__() self._renderer = nrd.asciidoc.AsciiDocRenderer(manpage_urls) + def test_lists() -> None: c = Converter({}) # attaching to the nth ancestor list requires n newlines before the + - assert c._render("""\ + assert ( + c._render("""\ - a b @@ -21,7 +24,8 @@ def test_lists() -> None: 1 f -""") == """\ +""") + == """\ [] * {empty}a + @@ -41,10 +45,14 @@ def test_lists() -> None: + f """ + ) + def test_full() -> None: - c = Converter({ 'man(1)': 'http://example.org' }) - assert c._render(sample1) == """\ + c = Converter({"man(1)": "http://example.org"}) + assert ( + c._render(sample1) + == """\ [WARNING] ==== foo @@ -143,3 +151,4 @@ def test_full() -> None: more stuff in same deflist:: {empty}foo """ + ) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_auto_id_prefix.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_auto_id_prefix.py index 6fb706bad5ac1..3581bc3d76b73 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_auto_id_prefix.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_auto_id_prefix.py @@ -4,7 +4,9 @@ from nixos_render_docs.manual import HTMLConverter, HTMLParameters from nixos_render_docs.md import Converter -auto_id_prefix="TEST_PREFIX" +auto_id_prefix = "TEST_PREFIX" + + def set_prefix(token: Token, ident: str) -> None: token.attrs["id"] = f"{auto_id_prefix}-{ident}" @@ -24,10 +26,7 @@ def test_auto_id_prefix_simple() -> None: {**token.attrs, "tag": token.tag} for token in tokens if token.type == "heading_open" - ] == [ - {"id": "TEST_PREFIX-1", "tag": "h1"}, - {"id": "TEST_PREFIX-1.1", "tag": "h2"} - ] + ] == [{"id": "TEST_PREFIX-1", "tag": "h1"}, {"id": "TEST_PREFIX-1.1", "tag": "h2"}] def test_auto_id_prefix_repeated() -> None: @@ -56,6 +55,7 @@ def test_auto_id_prefix_repeated() -> None: {"id": "TEST_PREFIX-2.1", "tag": "h2"}, ] + def test_auto_id_prefix_maximum_nested() -> None: md = HTMLConverter("1.0.0", HTMLParameters("", [], [], 2, 2, 2, Path("")), {}) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_commonmark.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_commonmark.py index 4ff0bc3095c3d..d19bb227c6973 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_commonmark.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_commonmark.py @@ -10,9 +10,11 @@ def __init__(self, manpage_urls: Mapping[str, str]): super().__init__() self._renderer = nrd.commonmark.CommonMarkRenderer(manpage_urls) + # NOTE: in these tests we represent trailing spaces by ` ` and replace them with real space later, # since a number of editors will strip trailing whitespace on save and that would break the tests. + def test_indented_fence() -> None: c = Converter({}) s = """\ @@ -21,12 +23,15 @@ def test_indented_fence() -> None: >       > rest > ```\ -""".replace(' ', ' ') +""".replace(" ", " ") assert c._render(s) == s + def test_full() -> None: - c = Converter({ 'man(1)': 'http://example.org' }) - assert c._render(sample1) == """\ + c = Converter({"man(1)": "http://example.org"}) + assert ( + c._render(sample1) + == """\ **Warning:** foo **Note:** nested @@ -90,10 +95,12 @@ def test_full() -> None: - *‌more stuff in same deflist‌*     - foo""".replace(' ', ' ') + foo""".replace(" ", " ") + ) + def test_images() -> None: c = Converter({}) - assert c._render("![*alt text*](foo \"title \\\"quoted\\\" text\")") == ( - "![*alt text*](foo \"title \\\"quoted\\\" text\")" + assert c._render('![*alt text*](foo "title \\"quoted\\" text")') == ( + '![*alt text*](foo "title \\"quoted\\" text")' ) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_headings.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_headings.py index d2f7c5cbe69ec..0b4dd5c250f84 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_headings.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_headings.py @@ -2,103 +2,466 @@ from markdown_it.token import Token + class Converter(nrd.md.Converter[nrd.html.HTMLRenderer]): # actual renderer doesn't matter, we're just parsing. def __init__(self, manpage_urls: dict[str, str]) -> None: super().__init__() self._renderer = nrd.html.HTMLRenderer(manpage_urls, {}) + def test_heading_id_absent() -> None: c = Converter({}) assert c._parse("# foo") == [ - Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo', markup='', info='', meta={}, block=False, hidden=False) - ], - content='foo', markup='', info='', meta={}, block=True, hidden=False), - Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False) + Token( + type="heading_open", + tag="h1", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + content="foo", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="heading_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_heading_id_present() -> None: c = Converter({}) assert c._parse("# foo {#foo}\n## bar { #bar}\n### bal { #bal} ") == [ - Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'foo'}, map=[0, 1], level=0, - children=None, content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='foo {#foo}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='heading_open', tag='h2', nesting=1, attrs={'id': 'bar'}, map=[1, 2], level=0, - children=None, content='', markup='##', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=1, - content='bar { #bar}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='bar', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h2', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='##', info='', meta={}, block=True, hidden=False), - Token(type='heading_open', tag='h3', nesting=1, attrs={'id': 'bal'}, map=[2, 3], level=0, - children=None, content='', markup='###', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[2, 3], level=1, - content='bal { #bal}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='bal', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h3', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='###', info='', meta={}, block=True, hidden=False) + Token( + type="heading_open", + tag="h1", + nesting=1, + attrs={"id": "foo"}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="foo {#foo}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="heading_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="heading_open", + tag="h2", + nesting=1, + attrs={"id": "bar"}, + map=[1, 2], + level=0, + children=None, + content="", + markup="##", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[1, 2], + level=1, + content="bar { #bar}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="bar", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="heading_close", + tag="h2", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="##", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="heading_open", + tag="h3", + nesting=1, + attrs={"id": "bal"}, + map=[2, 3], + level=0, + children=None, + content="", + markup="###", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[2, 3], + level=1, + content="bal { #bal}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="bal", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="heading_close", + tag="h3", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="###", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_heading_id_incomplete() -> None: c = Converter({}) assert c._parse("# foo {#}") == [ - Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='foo {#}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo {#}', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False) + Token( + type="heading_open", + tag="h1", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="foo {#}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo {#}", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="heading_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_heading_id_double() -> None: c = Converter({}) assert c._parse("# foo {#a} {#b}") == [ - Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'b'}, map=[0, 1], level=0, - children=None, content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='foo {#a} {#b}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo {#a}', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False) + Token( + type="heading_open", + tag="h1", + nesting=1, + attrs={"id": "b"}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="foo {#a} {#b}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo {#a}", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="heading_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_heading_id_suffixed() -> None: c = Converter({}) assert c._parse("# foo {#a} s") == [ - Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, - children=None, content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='foo {#a} s', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo {#a} s', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False) + Token( + type="heading_open", + tag="h1", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="foo {#a} s", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo {#a} s", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="heading_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), ] diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_html.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_html.py index 9a3e07cb24c7a..5627366419f46 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_html.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_html.py @@ -4,17 +4,25 @@ from sample_md import sample1 + class Renderer(nrd.html.HTMLRenderer): def _pull_image(self, src: str) -> str: return src + class Converter(nrd.md.Converter[nrd.html.HTMLRenderer]): - def __init__(self, manpage_urls: dict[str, str], xrefs: dict[str, nrd.manual_structure.XrefTarget]): + def __init__( + self, + manpage_urls: dict[str, str], + xrefs: dict[str, nrd.manual_structure.XrefTarget], + ): super().__init__() self._renderer = Renderer(manpage_urls, xrefs) + def unpretty(s: str) -> str: - return "".join(map(str.strip, s.splitlines())).replace('␣', ' ').replace('↵', '\n') + return "".join(map(str.strip, s.splitlines())).replace("␣", " ").replace("↵", "\n") + def test_lists_styles() -> None: # nested lists rotate through a number of list style @@ -62,21 +70,36 @@ def test_lists_styles() -> None: """) + def test_xrefs() -> None: # nested lists rotate through a number of list style - c = Converter({}, { - 'foo': nrd.manual_structure.XrefTarget('foo', '
    ', 'toc1', 'title1', 'index.html'), - 'bar': nrd.manual_structure.XrefTarget('bar', '
    ', 'toc2', 'title2', 'index.html', True), - }) - assert c._render("[](#foo)") == '


    ' - assert c._render("[](#bar)") == '


    ' + c = Converter( + {}, + { + "foo": nrd.manual_structure.XrefTarget( + "foo", "
    ", "toc1", "title1", "index.html" + ), + "bar": nrd.manual_structure.XrefTarget( + "bar", "
    ", "toc2", "title2", "index.html", True + ), + }, + ) + assert ( + c._render("[](#foo)") + == '


    ' + ) + assert ( + c._render("[](#bar)") + == '


    ' + ) with pytest.raises(nrd.html.UnresolvedXrefError) as exc: c._render("[](#baz)") - assert exc.value.args[0] == 'bad local reference, id #baz not known' + assert exc.value.args[0] == "bad local reference, id #baz not known" + def test_images() -> None: c = Converter({}, {}) - assert c._render("![*alt text*](foo \"title text\")") == unpretty(""" + assert c._render('![*alt text*](foo "title text")') == unpretty("""

    *alt text* @@ -84,13 +107,16 @@ def test_images() -> None:

    """) + def test_tables() -> None: c = Converter({}, {}) - assert c._render(textwrap.dedent(""" + assert c._render( + textwrap.dedent(""" | d | l | m | r | |---|:--|:-:|--:| | a | b | c | d | - """)) == unpretty(""" + """) + ) == unpretty("""
    @@ -119,17 +145,27 @@ def test_tables() -> None: """) + def test_footnotes() -> None: - c = Converter({}, { - "bar": nrd.manual_structure.XrefTarget("bar", "", None, None, ""), - "bar.__back.0": nrd.manual_structure.XrefTarget("bar.__back.0", "", None, None, ""), - "bar.__back.1": nrd.manual_structure.XrefTarget("bar.__back.1", "", None, None, ""), - }) - assert c._render(textwrap.dedent(""" + c = Converter( + {}, + { + "bar": nrd.manual_structure.XrefTarget("bar", "", None, None, ""), + "bar.__back.0": nrd.manual_structure.XrefTarget( + "bar.__back.0", "", None, None, "" + ), + "bar.__back.1": nrd.manual_structure.XrefTarget( + "bar.__back.1", "", None, None, "" + ), + }, + ) + assert c._render( + textwrap.dedent(""" foo [^bar] baz [^bar] [^bar]: note - """)) == unpretty(""" + """) + ) == unpretty("""

    foo [1]␣ baz [1] @@ -146,8 +182,9 @@ def test_footnotes() -> None: """) + def test_full() -> None: - c = Converter({ 'man(1)': 'http://example.org' }, {}) + c = Converter({"man(1)": "http://example.org"}, {}) assert c._render(sample1) == unpretty("""

    Warning

    diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_lists.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_lists.py index 26632c276b96e..00904612bb4fb 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_lists.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_lists.py @@ -3,186 +3,1069 @@ from markdown_it.token import Token + class Converter(nrd.md.Converter[nrd.html.HTMLRenderer]): # actual renderer doesn't matter, we're just parsing. def __init__(self, manpage_urls: dict[str, str]) -> None: super().__init__() self._renderer = nrd.html.HTMLRenderer(manpage_urls, {}) + @pytest.mark.parametrize("ordered", [True, False]) def test_list_wide(ordered: bool) -> None: t, tag, m, e1, e2, i1, i2 = ( - ("ordered", "ol", ".", "1.", "2.", "1", "2") if ordered else ("bullet", "ul", "-", "-", "-", "", "") + ("ordered", "ol", ".", "1.", "2.", "1", "2") + if ordered + else ("bullet", "ul", "-", "-", "-", "", "") ) c = Converter({}) - meta = { 'end': int(e2[:-1]) } if ordered else {} - meta['compact'] = False + meta = {"end": int(e2[:-1])} if ordered else {} + meta["compact"] = False assert c._parse(f"{e1} a\n\n{e2} b") == [ - Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={}, map=[0, 3], level=0, - children=None, content='', markup=m, info='', meta=meta, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 2], level=1, children=None, - content='', markup=m, info=i1, meta={}, block=True, hidden=False), - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=3, - content='a', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='a', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[2, 3], level=1, children=None, - content='', markup=m, info=i2, meta={}, block=True, hidden=False), - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[2, 3], level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[2, 3], level=3, - content='b', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='b', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False), - Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False) + Token( + type=f"{t}_list_open", + tag=tag, + nesting=1, + attrs={}, + map=[0, 3], + level=0, + children=None, + content="", + markup=m, + info="", + meta=meta, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[0, 2], + level=1, + children=None, + content="", + markup=m, + info=i1, + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=3, + content="a", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[2, 3], + level=1, + children=None, + content="", + markup=m, + info=i2, + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[2, 3], + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[2, 3], + level=3, + content="b", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="b", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type=f"{t}_list_close", + tag=tag, + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), ] + @pytest.mark.parametrize("ordered", [True, False]) def test_list_narrow(ordered: bool) -> None: t, tag, m, e1, e2, i1, i2 = ( - ("ordered", "ol", ".", "1.", "2.", "1", "2") if ordered else ("bullet", "ul", "-", "-", "-", "", "") + ("ordered", "ol", ".", "1.", "2.", "1", "2") + if ordered + else ("bullet", "ul", "-", "-", "-", "", "") ) c = Converter({}) - meta = { 'end': int(e2[:-1]) } if ordered else {} - meta['compact'] = True + meta = {"end": int(e2[:-1])} if ordered else {} + meta["compact"] = True assert c._parse(f"{e1} a\n{e2} b") == [ - Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={}, map=[0, 2], level=0, - children=None, content='', markup=m, info='', meta=meta, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None, - content='', markup=m, info=i1, meta={}, block=True, hidden=False), - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=3, - content='a', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='a', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None, - content='', markup=m, info=i2, meta={}, block=True, hidden=False), - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=3, - content='b', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='b', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False), - Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False) + Token( + type=f"{t}_list_open", + tag=tag, + nesting=1, + attrs={}, + map=[0, 2], + level=0, + children=None, + content="", + markup=m, + info="", + meta=meta, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[0, 1], + level=1, + children=None, + content="", + markup=m, + info=i1, + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=3, + content="a", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[1, 2], + level=1, + children=None, + content="", + markup=m, + info=i2, + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[1, 2], + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[1, 2], + level=3, + content="b", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="b", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type=f"{t}_list_close", + tag=tag, + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse(f"{e1} - a\n{e2} b") == [ - Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={}, map=[0, 2], level=0, - children=None, content='', markup=m, info='', meta=meta, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None, - content='', markup=m, info=i1, meta={}, block=True, hidden=False), - Token(type='bullet_list_open', tag='ul', nesting=1, attrs={}, map=[0, 1], level=2, - children=None, content='', markup='-', info='', meta={'compact': True}, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=3, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=4, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=5, - content='a', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='a', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None, - content='', markup=m, info=i2, meta={}, block=True, hidden=False), - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=3, - content='b', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='b', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False), - Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False) + Token( + type=f"{t}_list_open", + tag=tag, + nesting=1, + attrs={}, + map=[0, 2], + level=0, + children=None, + content="", + markup=m, + info="", + meta=meta, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[0, 1], + level=1, + children=None, + content="", + markup=m, + info=i1, + meta={}, + block=True, + hidden=False, + ), + Token( + type="bullet_list_open", + tag="ul", + nesting=1, + attrs={}, + map=[0, 1], + level=2, + children=None, + content="", + markup="-", + info="", + meta={"compact": True}, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[0, 1], + level=3, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=4, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=5, + content="a", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=4, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=3, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="bullet_list_close", + tag="ul", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[1, 2], + level=1, + children=None, + content="", + markup=m, + info=i2, + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[1, 2], + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[1, 2], + level=3, + content="b", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="b", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type=f"{t}_list_close", + tag=tag, + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse(f"{e1} - a\n{e2} - b") == [ - Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={}, map=[0, 2], level=0, - children=None, content='', markup=m, info='', meta=meta, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None, - content='', markup=m, info=i1, meta={}, block=True, hidden=False), - Token(type='bullet_list_open', tag='ul', nesting=1, attrs={}, map=[0, 1], level=2, - children=None, content='', markup='-', info='', meta={'compact': True}, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=3, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=4, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=5, - content='a', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='a', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None, - content='', markup=m, info=i2, meta={}, block=True, hidden=False), - Token(type='bullet_list_open', tag='ul', nesting=1, attrs={}, map=[1, 2], level=2, - children=None, content='', markup='-', info='', meta={'compact': True}, block=True, hidden=False), - Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=3, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=4, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=5, - content='b', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='b', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None, - content='', markup='', info='', meta={}, block=True, hidden=True), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None, - content='', markup='-', info='', meta={}, block=True, hidden=False), - Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False), - Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup=m, info='', meta={}, block=True, hidden=False) + Token( + type=f"{t}_list_open", + tag=tag, + nesting=1, + attrs={}, + map=[0, 2], + level=0, + children=None, + content="", + markup=m, + info="", + meta=meta, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[0, 1], + level=1, + children=None, + content="", + markup=m, + info=i1, + meta={}, + block=True, + hidden=False, + ), + Token( + type="bullet_list_open", + tag="ul", + nesting=1, + attrs={}, + map=[0, 1], + level=2, + children=None, + content="", + markup="-", + info="", + meta={"compact": True}, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[0, 1], + level=3, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=4, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=5, + content="a", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=4, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=3, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="bullet_list_close", + tag="ul", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[1, 2], + level=1, + children=None, + content="", + markup=m, + info=i2, + meta={}, + block=True, + hidden=False, + ), + Token( + type="bullet_list_open", + tag="ul", + nesting=1, + attrs={}, + map=[1, 2], + level=2, + children=None, + content="", + markup="-", + info="", + meta={"compact": True}, + block=True, + hidden=False, + ), + Token( + type="list_item_open", + tag="li", + nesting=1, + attrs={}, + map=[1, 2], + level=3, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[1, 2], + level=4, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[1, 2], + level=5, + content="b", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="b", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=4, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=True, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=3, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="bullet_list_close", + tag="ul", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="-", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="list_item_close", + tag="li", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type=f"{t}_list_close", + tag=tag, + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=m, + info="", + meta={}, + block=True, + hidden=False, + ), ] diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_manpage.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_manpage.py index b6e4a94ef1c67..e1448e9fce1e9 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_manpage.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_manpage.py @@ -6,39 +6,51 @@ class Converter(nrd.md.Converter[nrd.manpage.ManpageRenderer]): - def __init__(self, manpage_urls: Mapping[str, str], options_by_id: dict[str, str] = {}): + def __init__( + self, manpage_urls: Mapping[str, str], options_by_id: dict[str, str] = {} + ): super().__init__() self._renderer = nrd.manpage.ManpageRenderer(manpage_urls, options_by_id) + def test_inline_code() -> None: c = Converter({}) assert c._render("1 `x a x` 2") == "1 \\fR\\(oqx a x\\(cq\\fP 2" + def test_fonts() -> None: c = Converter({}) assert c._render("*a **b** c*") == "\\fIa \\fBb\\fI c\\fR" assert c._render("*a [1 `2`](3) c*") == "\\fIa \\fB1 \\fR\\(oq2\\(cq\\fP\\fI c\\fR" + def test_expand_link_targets() -> None: - c = Converter({}, { '#foo1': "bar", "#foo2": "bar" }) - assert (c._render("[a](#foo1) [](#foo2) [b](#bar1) [](#bar2)") == - "\\fBa\\fR \\fBbar\\fR \\fBb\\fR \\fB\\fR") + c = Converter({}, {"#foo1": "bar", "#foo2": "bar"}) + assert ( + c._render("[a](#foo1) [](#foo2) [b](#bar1) [](#bar2)") + == "\\fBa\\fR \\fBbar\\fR \\fBb\\fR \\fB\\fR" + ) + def test_collect_links() -> None: - c = Converter({}, { '#foo': "bar" }) + c = Converter({}, {"#foo": "bar"}) c._renderer.link_footnotes = [] assert c._render("[a](link1) [b](link2)") == "\\fBa\\fR[1]\\fR \\fBb\\fR[2]\\fR" - assert c._renderer.link_footnotes == ['link1', 'link2'] + assert c._renderer.link_footnotes == ["link1", "link2"] + def test_dedup_links() -> None: - c = Converter({}, { '#foo': "bar" }) + c = Converter({}, {"#foo": "bar"}) c._renderer.link_footnotes = [] assert c._render("[a](link) [b](link)") == "\\fBa\\fR[1]\\fR \\fBb\\fR[1]\\fR" - assert c._renderer.link_footnotes == ['link'] + assert c._renderer.link_footnotes == ["link"] + def test_full() -> None: - c = Converter({ 'man(1)': 'http://example.org' }) - assert c._render(sample1) == """\ + c = Converter({"man(1)": "http://example.org"}) + assert ( + c._render(sample1) + == """\ .sp .RS 4 \\fBWarning\\fP @@ -167,3 +179,4 @@ def test_full() -> None: foo .RE .RE""" + ) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_options.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_options.py index 12639c0f30f87..570edb6679897 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_options.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_options.py @@ -6,34 +6,50 @@ import nixos_render_docs from nixos_render_docs.options import AnchorStyle + def test_option_headings() -> None: - c = nixos_render_docs.options.HTMLConverter({}, 'local', 'vars', 'opt-', {}) + c = nixos_render_docs.options.HTMLConverter({}, "local", "vars", "opt-", {}) with pytest.raises(RuntimeError) as exc: c._render("# foo") - assert exc.value.args[0] == 'md token not supported in options doc' + assert exc.value.args[0] == "md token not supported in options doc" assert exc.value.args[1] == Token( - type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False + type="heading_open", + tag="h1", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, ) + def test_options_commonmark() -> None: - c = nixos_render_docs.options.CommonMarkConverter({}, 'local') - with Path('tests/sample_options_simple.json').open() as f: + c = nixos_render_docs.options.CommonMarkConverter({}, "local") + with Path("tests/sample_options_simple.json").open() as f: opts = json.load(f) assert opts is not None - with Path('tests/sample_options_simple_default.md').open() as f: + with Path("tests/sample_options_simple_default.md").open() as f: expected = f.read() c.add_options(opts) s = c.finalize() assert s == expected + def test_options_commonmark_legacy_anchors() -> None: - c = nixos_render_docs.options.CommonMarkConverter({}, 'local', anchor_style = AnchorStyle.LEGACY, anchor_prefix = 'opt-') - with Path('tests/sample_options_simple.json').open() as f: + c = nixos_render_docs.options.CommonMarkConverter( + {}, "local", anchor_style=AnchorStyle.LEGACY, anchor_prefix="opt-" + ) + with Path("tests/sample_options_simple.json").open() as f: opts = json.load(f) assert opts is not None - with Path('tests/sample_options_simple_legacy.md').open() as f: + with Path("tests/sample_options_simple_legacy.md").open() as f: expected = f.read() c.add_options(opts) diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_plugins.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_plugins.py index c38f82afd67bc..85b7341a6034f 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_plugins.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_plugins.py @@ -6,347 +6,1793 @@ import nixos_render_docs as nrd from nixos_render_docs.src_error import SrcError + class Converter(nrd.md.Converter[nrd.html.HTMLRenderer]): # actual renderer doesn't matter, we're just parsing. def __init__(self, manpage_urls: dict[str, str]) -> None: super().__init__() self._renderer = nrd.html.HTMLRenderer(manpage_urls, {}) + def test_attr_span_parsing() -> None: c = Converter({}) assert c._parse("[]{#test}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, content='[]{#test}', - markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'test'}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="[]{#test}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"id": "test"}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("[]{.test}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, content='[]{.test}', - markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='attr_span_begin', tag='span', nesting=1, attrs={'class': 'test'}, map=None, - level=0, children=None, content='', markup='', info='', meta={}, block=False, - hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="[]{.test}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"class": "test"}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("[]{.test1 .test2 #foo .test3 .test4}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='[]{.test1 .test2 #foo .test3 .test4}', - markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='attr_span_begin', tag='span', nesting=1, - attrs={'class': 'test1 test2 test3 test4', 'id': 'foo'}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="[]{.test1 .test2 #foo .test3 .test4}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"class": "test1 test2 test3 test4", "id": "foo"}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("[]{#a #a}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='[]{#a #a}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='[]{#a #a}', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="[]{#a #a}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="[]{#a #a}", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("[]{foo}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='[]{foo}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='[]{foo}', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="[]{foo}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="[]{foo}", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_attr_span_formatted() -> None: c = Converter({}) assert c._parse("a[b c `d` ***e***]{#test}f") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, - children=None, content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='a[b c `d` ***e***]{#test}f', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, - children=None, content='a', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'test'}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content='b c ', markup='', info='', meta={}, block=False, hidden=False), - Token(type='code_inline', tag='code', nesting=0, attrs={}, map=None, level=1, - children=None, content='d', markup='`', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content=' ', markup='', info='', meta={}, block=False, hidden=False), - Token(type='em_open', tag='em', nesting=1, attrs={}, map=None, level=1, children=None, - content='', markup='*', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None, - content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='strong_open', tag='strong', nesting=1, attrs={}, map=None, level=2, - children=None, content='', markup='**', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=3, children=None, - content='e', markup='', info='', meta={}, block=False, hidden=False), - Token(type='strong_close', tag='strong', nesting=-1, attrs={}, map=None, level=2, - children=None, content='', markup='**', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None, - content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='em_close', tag='em', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup='*', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='f', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="a[b c `d` ***e***]{#test}f", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"id": "test"}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content="b c ", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="code_inline", + tag="code", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content="d", + markup="`", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content=" ", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="em_open", + tag="em", + nesting=1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup="*", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="strong_open", + tag="strong", + nesting=1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="**", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=3, + children=None, + content="e", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="strong_close", + tag="strong", + nesting=-1, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="**", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=2, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="em_close", + tag="em", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup="*", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="f", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_attr_span_in_heading() -> None: c = Converter({}) # inline anchors in headers are allowed, but header attributes should be preferred assert c._parse("# foo []{#bar} baz") == [ - Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='foo []{#bar} baz', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo ', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'bar'}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content=' baz', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False) + Token( + type="heading_open", + tag="h1", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="foo []{#bar} baz", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo ", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"id": "bar"}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content=" baz", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="heading_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_attr_span_on_links() -> None: c = Converter({}) assert c._parse("[ [a](#bar) ]{#foo}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, content='[ [a](#bar) ]{#foo}', - markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'foo'}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content=' ', markup='', info='', meta={}, block=False, hidden=False), - Token(type='link_open', tag='a', nesting=1, attrs={'href': '#bar'}, map=None, level=1, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None, - content='a', markup='', info='', meta={}, block=False, hidden=False), - Token(type='link_close', tag='a', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content=' ', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="[ [a](#bar) ]{#foo}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"id": "foo"}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content=" ", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="link_open", + tag="a", + nesting=1, + attrs={"href": "#bar"}, + map=None, + level=1, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=2, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="link_close", + tag="a", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content=" ", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_attr_span_nested() -> None: # inline anchors may contain more anchors (even though this is a bit pointless) c = Converter({}) assert c._parse("[ [a]{#bar} ]{#foo}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='[ [a]{#bar} ]{#foo}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'foo'}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content=' ', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'bar'}, map=None, level=1, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None, - content='a', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=1, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content=' ', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="[ [a]{#bar} ]{#foo}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"id": "foo"}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content=" ", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"id": "bar"}, + map=None, + level=1, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=2, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content=" ", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_attr_span_escaping() -> None: c = Converter({}) assert c._parse("\\[a]{#bar}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='[a]{#bar}', markup='\\[', info='escape', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="\\[a]{#bar}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="[a]{#bar}", + markup="\\[", + info="escape", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("\\\\[a]{#bar}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='\\\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='\\', markup='\\\\', info='escape', meta={}, block=False, hidden=False), - Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'bar'}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content='a', markup='', info='', meta={}, block=False, hidden=False), - Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="\\\\[a]{#bar}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="\\", + markup="\\\\", + info="escape", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_begin", + tag="span", + nesting=1, + attrs={"id": "bar"}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="attr_span_end", + tag="span", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("\\\\\\[a]{#bar}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='\\[a]{#bar}', markup='\\\\', info='escape', meta={}, block=False, hidden=False) - ], - content='\\\\\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="\\[a]{#bar}", + markup="\\\\", + info="escape", + meta={}, + block=False, + hidden=False, + ) + ], + content="\\\\\\[a]{#bar}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_inline_comment_basic() -> None: c = Converter({}) assert c._parse("a b") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='a b', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='a b', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="a b", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a b", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("a") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='a', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='a', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="a", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_inline_comment_does_not_nest_in_code() -> None: c = Converter({}) assert c._parse("`ac`") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='`ac`', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='code_inline', tag='code', nesting=0, attrs={}, map=None, level=0, children=None, - content='ac', markup='`', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="`ac`", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="code_inline", + tag="code", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="ac", + markup="`", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_inline_comment_does_not_nest_elsewhere() -> None: c = Converter({}) assert c._parse("*ac*") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='*ac*', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='em_open', tag='em', nesting=1, attrs={}, map=None, level=0, children=None, - content='', markup='*', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content='ac', markup='', info='', meta={}, block=False, hidden=False), - Token(type='em_close', tag='em', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='*', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="*ac*", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="em_open", + tag="em", + nesting=1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="*", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content="ac", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="em_close", + tag="em", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="*", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_inline_comment_can_be_escaped() -> None: c = Converter({}) assert c._parse("a\\c") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='a\\c', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='ac', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="a\\c", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="ac", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("a\\\\c") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='a\\c', markup='', info='', meta={}, block=False, hidden=False) - ], - content='a\\\\c', markup='', info='', meta={}, block=True, hidden=False), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a\\c", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + content="a\\\\c", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("a\\\\\\c") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='a\\c', markup='', info='', meta={}, block=False, hidden=False) - ], - content='a\\\\\\c', markup='', info='', meta={}, block=True, hidden=False), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="a\\c", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + content="a\\\\\\c", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_block_comment() -> None: c = Converter({}) assert c._parse("") == [] @@ -355,157 +1801,694 @@ def test_block_comment() -> None: assert c._parse("") == [] assert c._parse("") == [] + def test_heading_attributes() -> None: c = Converter({}) assert c._parse("# foo *bar* {#hid}") == [ - Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'hid'}, map=[0, 1], level=0, - children=None, content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='foo *bar* {#hid}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo ', markup='', info='', meta={}, block=False, hidden=False), - Token(type='em_open', tag='em', nesting=1, attrs={}, map=None, level=0, children=None, - content='', markup='*', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None, - content='bar', markup='', info='', meta={}, block=False, hidden=False), - Token(type='em_close', tag='em', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='*', info='', meta={}, block=False, hidden=False), - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False) + Token( + type="heading_open", + tag="h1", + nesting=1, + attrs={"id": "hid"}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="foo *bar* {#hid}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo ", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="em_open", + tag="em", + nesting=1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="*", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=1, + children=None, + content="bar", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="em_close", + tag="em", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="*", + info="", + meta={}, + block=False, + hidden=False, + ), + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ), + ], + ), + Token( + type="heading_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("# foo--bar {#id-with--double-dashes}") == [ - Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'id-with--double-dashes'}, map=[0, 1], - level=0, children=None, content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='foo--bar {#id-with--double-dashes}', markup='', info='', meta={}, block=True, - hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo–bar', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False) + Token( + type="heading_open", + tag="h1", + nesting=1, + attrs={"id": "id-with--double-dashes"}, + map=[0, 1], + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="foo--bar {#id-with--double-dashes}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo–bar", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="heading_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_admonitions() -> None: c = Converter({}) assert c._parse("::: {.note}") == [ - Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0, - children=None, content='', markup=':::', info=' {.note}', meta={'kind': 'note'}, block=True, - hidden=False), - Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False) + Token( + type="admonition_open", + tag="div", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup=":::", + info=" {.note}", + meta={"kind": "note"}, + block=True, + hidden=False, + ), + Token( + type="admonition_close", + tag="div", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=":::", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("::: {.caution}") == [ - Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0, - children=None, content='', markup=':::', info=' {.caution}', meta={'kind': 'caution'}, - block=True, hidden=False), - Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False) + Token( + type="admonition_open", + tag="div", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup=":::", + info=" {.caution}", + meta={"kind": "caution"}, + block=True, + hidden=False, + ), + Token( + type="admonition_close", + tag="div", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=":::", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("::: {.tip}") == [ - Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0, - children=None, content='', markup=':::', info=' {.tip}', meta={'kind': 'tip'}, block=True, - hidden=False), - Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False) + Token( + type="admonition_open", + tag="div", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup=":::", + info=" {.tip}", + meta={"kind": "tip"}, + block=True, + hidden=False, + ), + Token( + type="admonition_close", + tag="div", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=":::", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("::: {.important}") == [ - Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0, - children=None, content='', markup=':::', info=' {.important}', meta={'kind': 'important'}, - block=True, hidden=False), - Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False) + Token( + type="admonition_open", + tag="div", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup=":::", + info=" {.important}", + meta={"kind": "important"}, + block=True, + hidden=False, + ), + Token( + type="admonition_close", + tag="div", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=":::", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("::: {.warning}") == [ - Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0, - children=None, content='', markup=':::', info=' {.warning}', meta={'kind': 'warning'}, - block=True, hidden=False), - Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0, - children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False) + Token( + type="admonition_open", + tag="div", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup=":::", + info=" {.warning}", + meta={"kind": "warning"}, + block=True, + hidden=False, + ), + Token( + type="admonition_close", + tag="div", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup=":::", + info="", + meta={}, + block=True, + hidden=False, + ), ] + def test_example() -> None: c = Converter({}) assert c._parse("::: {.example}\n# foo") == [ - Token(type='example_open', tag='div', nesting=1, attrs={}, map=[0, 2], level=0, children=None, - content='', markup=':::', info=' {.example}', meta={}, block=True, hidden=False), - Token(type='example_title_open', tag='h1', nesting=1, attrs={}, map=[1, 2], level=1, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=2, - content='foo', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='example_title_close', tag='h1', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='example_close', tag='div', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="example_open", + tag="div", + nesting=1, + attrs={}, + map=[0, 2], + level=0, + children=None, + content="", + markup=":::", + info=" {.example}", + meta={}, + block=True, + hidden=False, + ), + Token( + type="example_title_open", + tag="h1", + nesting=1, + attrs={}, + map=[1, 2], + level=1, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[1, 2], + level=2, + content="foo", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="example_title_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="example_close", + tag="div", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("::: {#eid .example}\n# foo") == [ - Token(type='example_open', tag='div', nesting=1, attrs={'id': 'eid'}, map=[0, 2], level=0, - children=None, content='', markup=':::', info=' {#eid .example}', meta={}, block=True, - hidden=False), - Token(type='example_title_open', tag='h1', nesting=1, attrs={}, map=[1, 2], level=1, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=2, - content='foo', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='foo', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='example_title_close', tag='h1', nesting=-1, attrs={}, map=None, level=1, children=None, - content='', markup='#', info='', meta={}, block=True, hidden=False), - Token(type='example_close', tag='div', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="example_open", + tag="div", + nesting=1, + attrs={"id": "eid"}, + map=[0, 2], + level=0, + children=None, + content="", + markup=":::", + info=" {#eid .example}", + meta={}, + block=True, + hidden=False, + ), + Token( + type="example_title_open", + tag="h1", + nesting=1, + attrs={}, + map=[1, 2], + level=1, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[1, 2], + level=2, + content="foo", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="foo", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="example_title_close", + tag="h1", + nesting=-1, + attrs={}, + map=None, + level=1, + children=None, + content="", + markup="#", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="example_close", + tag="div", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("::: {.example .note}") == [ - Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, - content='::: {.example .note}', markup='', info='', meta={}, block=True, hidden=False, - children=[ - Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None, - content='::: {.example .note}', markup='', info='', meta={}, block=False, hidden=False) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None, - content='', markup='', info='', meta={}, block=True, hidden=False) + Token( + type="paragraph_open", + tag="p", + nesting=1, + attrs={}, + map=[0, 1], + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + attrs={}, + map=[0, 1], + level=1, + content="::: {.example .note}", + markup="", + info="", + meta={}, + block=True, + hidden=False, + children=[ + Token( + type="text", + tag="", + nesting=0, + attrs={}, + map=None, + level=0, + children=None, + content="::: {.example .note}", + markup="", + info="", + meta={}, + block=False, + hidden=False, + ) + ], + ), + Token( + type="paragraph_close", + tag="p", + nesting=-1, + attrs={}, + map=None, + level=0, + children=None, + content="", + markup="", + info="", + meta={}, + block=True, + hidden=False, + ), ] assert c._parse("::: {.example}\n### foo: `code`\nbar\n:::\nbaz") == [ - Token(type='example_open', tag='div', nesting=1, map=[0, 3], markup=':::', info=' {.example}', - block=True), - Token(type='example_title_open', tag='h3', nesting=1, map=[1, 2], level=1, markup='###', block=True), - Token(type='inline', tag='', nesting=0, map=[1, 2], level=2, content='foo: `code`', block=True, - children=[ - Token(type='text', tag='', nesting=0, content='foo: '), - Token(type='code_inline', tag='code', nesting=0, content='code', markup='`') - ]), - Token(type='example_title_close', tag='h3', nesting=-1, level=1, markup='###', block=True), - Token(type='paragraph_open', tag='p', nesting=1, map=[2, 3], level=1, block=True), - Token(type='inline', tag='', nesting=0, map=[2, 3], level=2, content='bar', block=True, - children=[ - Token(type='text', tag='', nesting=0, content='bar') - ]), - Token(type='paragraph_close', tag='p', nesting=-1, level=1, block=True), - Token(type='example_close', tag='div', nesting=-1, markup=':::', block=True), - Token(type='paragraph_open', tag='p', nesting=1, map=[4, 5], block=True), - Token(type='inline', tag='', nesting=0, map=[4, 5], level=1, content='baz', block=True, - children=[ - Token(type='text', tag='', nesting=0, content='baz') - ]), - Token(type='paragraph_close', tag='p', nesting=-1, block=True) + Token( + type="example_open", + tag="div", + nesting=1, + map=[0, 3], + markup=":::", + info=" {.example}", + block=True, + ), + Token( + type="example_title_open", + tag="h3", + nesting=1, + map=[1, 2], + level=1, + markup="###", + block=True, + ), + Token( + type="inline", + tag="", + nesting=0, + map=[1, 2], + level=2, + content="foo: `code`", + block=True, + children=[ + Token(type="text", tag="", nesting=0, content="foo: "), + Token( + type="code_inline", + tag="code", + nesting=0, + content="code", + markup="`", + ), + ], + ), + Token( + type="example_title_close", + tag="h3", + nesting=-1, + level=1, + markup="###", + block=True, + ), + Token( + type="paragraph_open", tag="p", nesting=1, map=[2, 3], level=1, block=True + ), + Token( + type="inline", + tag="", + nesting=0, + map=[2, 3], + level=2, + content="bar", + block=True, + children=[Token(type="text", tag="", nesting=0, content="bar")], + ), + Token(type="paragraph_close", tag="p", nesting=-1, level=1, block=True), + Token(type="example_close", tag="div", nesting=-1, markup=":::", block=True), + Token(type="paragraph_open", tag="p", nesting=1, map=[4, 5], block=True), + Token( + type="inline", + tag="", + nesting=0, + map=[4, 5], + level=1, + content="baz", + block=True, + children=[Token(type="text", tag="", nesting=0, content="baz")], + ), + Token(type="paragraph_close", tag="p", nesting=-1, block=True), ] with pytest.raises(SrcError) as exc: c._parse("::: {.example}\n### foo\n### bar\n:::") - assert str(exc.value) == textwrap.dedent( - """ + assert ( + str(exc.value) + == textwrap.dedent( + """ unexpected non-title heading in `:::{.example}`; are you missing a `:::`? Note: blocks like `:::{.example}` are only allowed to contain a single heading in order to simplify TOC generation. @@ -521,29 +2504,68 @@ def test_example() -> None: \x1b[2m\x1b[37m 3\x1b[0m \x1b[1m\x1b[33m┃\x1b[0m ### bar\x1b[0m \x1b[2m\x1b[37m 4\x1b[0m \x1b[2m\x1b[37m┆ :::\x1b[0m """ - ).strip() + ).strip() + ) + def test_footnotes() -> None: c = Converter({}) assert c._parse("text [^foo]\n\n[^foo]: bar") == [ - Token(type='paragraph_open', tag='p', nesting=1, map=[0, 1], block=True), - Token(type='inline', tag='', nesting=0, map=[0, 1], level=1, content='text [^foo]', block=True, - children=[ - Token(type='text', tag='', nesting=0, content='text '), - Token(type='footnote_ref', tag='', nesting=0, attrs={'id': 'foo.__back.0'}, - meta={'id': 0, 'subId': 0, 'label': 'foo', 'target': 'foo'}) - ]), - Token(type='paragraph_close', tag='p', nesting=-1, block=True), - Token(type='footnote_block_open', tag='', nesting=1), - Token(type='footnote_open', tag='', nesting=1, attrs={'id': 'foo'}, meta={'id': 0, 'label': 'foo'}), - Token(type='paragraph_open', tag='p', nesting=1, map=[2, 3], level=1, block=True, hidden=False), - Token(type='inline', tag='', nesting=0, map=[2, 3], level=2, content='bar', block=True, - children=[ - Token(type='text', tag='', nesting=0, content='bar') - ]), - Token(type='footnote_anchor', tag='', nesting=0, - meta={'id': 0, 'label': 'foo', 'subId': 0, 'target': 'foo.__back.0'}), - Token(type='paragraph_close', tag='p', nesting=-1, level=1, block=True), - Token(type='footnote_close', tag='', nesting=-1), - Token(type='footnote_block_close', tag='', nesting=-1), + Token(type="paragraph_open", tag="p", nesting=1, map=[0, 1], block=True), + Token( + type="inline", + tag="", + nesting=0, + map=[0, 1], + level=1, + content="text [^foo]", + block=True, + children=[ + Token(type="text", tag="", nesting=0, content="text "), + Token( + type="footnote_ref", + tag="", + nesting=0, + attrs={"id": "foo.__back.0"}, + meta={"id": 0, "subId": 0, "label": "foo", "target": "foo"}, + ), + ], + ), + Token(type="paragraph_close", tag="p", nesting=-1, block=True), + Token(type="footnote_block_open", tag="", nesting=1), + Token( + type="footnote_open", + tag="", + nesting=1, + attrs={"id": "foo"}, + meta={"id": 0, "label": "foo"}, + ), + Token( + type="paragraph_open", + tag="p", + nesting=1, + map=[2, 3], + level=1, + block=True, + hidden=False, + ), + Token( + type="inline", + tag="", + nesting=0, + map=[2, 3], + level=2, + content="bar", + block=True, + children=[Token(type="text", tag="", nesting=0, content="bar")], + ), + Token( + type="footnote_anchor", + tag="", + nesting=0, + meta={"id": 0, "label": "foo", "subId": 0, "target": "foo.__back.0"}, + ), + Token(type="paragraph_close", tag="p", nesting=-1, level=1, block=True), + Token(type="footnote_close", tag="", nesting=-1), + Token(type="footnote_block_close", tag="", nesting=-1), ] diff --git a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_redirects.py b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_redirects.py index 56077b90014be..dda414634fd04 100644 --- a/pkgs/by-name/ni/nixos-render-docs/src/tests/test_redirects.py +++ b/pkgs/by-name/ni/nixos-render-docs/src/tests/test_redirects.py @@ -8,22 +8,34 @@ class TestRedirects(unittest.TestCase): def setup_test(self, sources, raw_redirects): - with open(Path(__file__).parent / 'index.md', 'w') as infile: - indexHTML = ["# Redirects test suite {#redirects-test-suite}\n## Setup steps"] + with open(Path(__file__).parent / "index.md", "w") as infile: + indexHTML = [ + "# Redirects test suite {#redirects-test-suite}\n## Setup steps" + ] for path in sources.keys(): outpath = f"{path.split('.md')[0]}.html" - indexHTML.append(f"```{{=include=}} appendix html:into-file=//{outpath}\n{path}\n```") + indexHTML.append( + f"```{{=include=}} appendix html:into-file=//{outpath}\n{path}\n```" + ) infile.write("\n".join(indexHTML)) for filename, content in sources.items(): - with open(Path(__file__).parent / filename, 'w') as infile: + with open(Path(__file__).parent / filename, "w") as infile: infile.write(content) - redirects = Redirects({"redirects-test-suite": ["index.html#redirects-test-suite"]} | raw_redirects, '') - return HTMLConverter("1.0.0", HTMLParameters("", [], [], 2, 2, 2, Path("")), {}, redirects) + redirects = Redirects( + {"redirects-test-suite": ["index.html#redirects-test-suite"]} + | raw_redirects, + "", + ) + return HTMLConverter( + "1.0.0", HTMLParameters("", [], [], 2, 2, 2, Path("")), {}, redirects + ) def run_test(self, md: HTMLConverter): - md.convert(Path(__file__).parent / 'index.md', Path(__file__).parent / 'index.html') + md.convert( + Path(__file__).parent / "index.md", Path(__file__).parent / "index.html" + ) def assert_redirect_error(self, expected_errors: dict, md: HTMLConverter): with self.assertRaises(RuntimeError) as context: @@ -49,7 +61,9 @@ def test_identifier_added(self): sources={"foo.md": "# Foo {#foo}\n## Bar {#bar}"}, raw_redirects={"foo": ["foo.html#foo"]}, ) - self.assert_redirect_error({"identifiers_without_redirects": ["bar"]}, intermediate) + self.assert_redirect_error( + {"identifiers_without_redirects": ["bar"]}, intermediate + ) after = self.setup_test( sources={"foo.md": "# Foo {#foo}\n## Bar {#bar}"}, @@ -92,14 +106,17 @@ def test_identifier_renamed(self): self.assert_redirect_error( { "identifiers_without_redirects": ["foo-prime"], - "orphan_identifiers": ["foo"] + "orphan_identifiers": ["foo"], }, - intermediate + intermediate, ) after = self.setup_test( sources={"foo.md": "# Foo Prime {#foo-prime}\n## Bar {#bar}"}, - raw_redirects={"foo-prime": ["foo.html#foo-prime", "foo.html#foo"], "bar": ["foo.html#bar"]}, + raw_redirects={ + "foo-prime": ["foo.html#foo-prime", "foo.html#foo"], + "bar": ["foo.html#bar"], + }, ) self.run_test(after) @@ -112,20 +129,19 @@ def test_leaf_identifier_moved_to_different_file(self): self.run_test(before) intermediate = self.setup_test( - sources={ - "foo.md": "# Foo {#foo}", - "bar.md": "# Bar {#bar}" - }, + sources={"foo.md": "# Foo {#foo}", "bar.md": "# Bar {#bar}"}, raw_redirects={"foo": ["foo.html#foo"], "bar": ["foo.html#foo"]}, ) - self.assert_redirect_error({"identifiers_missing_current_outpath": ["bar"]}, intermediate) + self.assert_redirect_error( + {"identifiers_missing_current_outpath": ["bar"]}, intermediate + ) after = self.setup_test( - sources={ - "foo.md": "# Foo {#foo}", - "bar.md": "# Bar {#bar}" + sources={"foo.md": "# Foo {#foo}", "bar.md": "# Bar {#bar}"}, + raw_redirects={ + "foo": ["foo.html#foo"], + "bar": ["bar.html#bar", "foo.html#bar"], }, - raw_redirects={"foo": ["foo.html#foo"], "bar": ["bar.html#bar", "foo.html#bar"]}, ) self.run_test(after) @@ -133,28 +149,32 @@ def test_non_leaf_identifier_moved_to_different_file(self): """Test moving a non-leaf identifier to a different output path.""" before = self.setup_test( sources={"foo.md": "# Foo {#foo}\n## Bar {#bar}\n### Baz {#baz}"}, - raw_redirects={"foo": ["foo.html#foo"], "bar": ["foo.html#bar"], "baz": ["foo.html#baz"]}, + raw_redirects={ + "foo": ["foo.html#foo"], + "bar": ["foo.html#bar"], + "baz": ["foo.html#baz"], + }, ) self.run_test(before) intermediate = self.setup_test( - sources={ - "foo.md": "# Foo {#foo}", - "bar.md": "# Bar {#bar}\n## Baz {#baz}" + sources={"foo.md": "# Foo {#foo}", "bar.md": "# Bar {#bar}\n## Baz {#baz}"}, + raw_redirects={ + "foo": ["foo.html#foo"], + "bar": ["foo.html#bar"], + "baz": ["foo.html#baz"], }, - raw_redirects={"foo": ["foo.html#foo"], "bar": ["foo.html#bar"], "baz": ["foo.html#baz"]}, ) - self.assert_redirect_error({"identifiers_missing_current_outpath": ["bar", "baz"]}, intermediate) + self.assert_redirect_error( + {"identifiers_missing_current_outpath": ["bar", "baz"]}, intermediate + ) after = self.setup_test( - sources={ - "foo.md": "# Foo {#foo}", - "bar.md": "# Bar {#bar}\n## Baz {#baz}" - }, + sources={"foo.md": "# Foo {#foo}", "bar.md": "# Bar {#bar}\n## Baz {#baz}"}, raw_redirects={ "foo": ["foo.html#foo"], "bar": ["bar.html#bar", "foo.html#bar"], - "baz": ["bar.html#baz", "foo.html#baz"] + "baz": ["bar.html#baz", "foo.html#baz"], }, ) self.run_test(after) @@ -166,21 +186,18 @@ def test_conflicting_anchors(self): raw_redirects={ "foo": ["foo.html#foo", "foo.html#bar"], "bar": ["foo.html#bar"], - } + }, ) self.assert_redirect_error({"conflicting_anchors": ["bar"]}, md) def test_divergent_redirect(self): """Test for divergent redirects.""" md = self.setup_test( - sources={ - "foo.md": "# Foo {#foo}", - "bar.md": "# Bar {#bar}" - }, + sources={"foo.md": "# Foo {#foo}", "bar.md": "# Bar {#bar}"}, raw_redirects={ "foo": ["foo.html#foo", "old-foo.html"], - "bar": ["bar.html#bar", "old-foo.html"] - } + "bar": ["bar.html#bar", "old-foo.html"], + }, ) self.assert_redirect_error({"divergent_redirects": ["old-foo.html"]}, md) @@ -188,7 +205,10 @@ def test_no_client_redirects(self): """Test fetching client side redirects and ignore server-side ones.""" md = self.setup_test( sources={"foo.md": "# Foo {#foo}\n## Bar {#bar}"}, - raw_redirects={"foo": ["foo.html#foo"], "bar": ["foo.html#bar", "bar.html"]} + raw_redirects={ + "foo": ["foo.html#foo"], + "bar": ["foo.html#bar", "bar.html"], + }, ) self.run_test(md) self.assertEqual(md._redirects.get_client_redirects("foo.html"), {}) @@ -198,14 +218,21 @@ def test_basic_redirect_matching(self): md = self.setup_test( sources={"foo.md": "# Foo {#foo}\n## Bar {#bar}"}, raw_redirects={ - 'foo': ['foo.html#foo', 'foo.html#some-section', 'foo.html#another-section'], - 'bar': ['foo.html#bar'], + "foo": [ + "foo.html#foo", + "foo.html#some-section", + "foo.html#another-section", + ], + "bar": ["foo.html#bar"], }, ) self.run_test(md) client_redirects = md._redirects.get_client_redirects("foo.html") - expected_redirects = {'some-section': 'foo.html#foo', 'another-section': 'foo.html#foo'} + expected_redirects = { + "some-section": "foo.html#foo", + "another-section": "foo.html#foo", + } self.assertEqual(client_redirects, expected_redirects) def test_advanced_redirect_matching(self): @@ -213,19 +240,19 @@ def test_advanced_redirect_matching(self): md = self.setup_test( sources={"foo.md": "# Foo {#foo}", "bar.md": "# Bar {#bar}"}, raw_redirects={ - 'foo': ['foo.html#foo', 'foo.html#some-section', 'bar.html#foo'], - 'bar': ['bar.html#bar', 'bar.html#another-section'], + "foo": ["foo.html#foo", "foo.html#some-section", "bar.html#foo"], + "bar": ["bar.html#bar", "bar.html#another-section"], }, ) self.run_test(md) self.assertEqual(md._redirects.get_client_redirects("index.html"), {}) client_redirects = md._redirects.get_client_redirects("foo.html") - expected_redirects = {'some-section': 'foo.html#foo'} + expected_redirects = {"some-section": "foo.html#foo"} self.assertEqual(client_redirects, expected_redirects) client_redirects = md._redirects.get_client_redirects("bar.html") - expected_redirects = {'foo': 'foo.html#foo', 'another-section': 'bar.html#bar'} + expected_redirects = {"foo": "foo.html#foo", "another-section": "bar.html#bar"} self.assertEqual(client_redirects, expected_redirects) def test_server_redirects(self): @@ -233,14 +260,17 @@ def test_server_redirects(self): md = self.setup_test( sources={"foo.md": "# Foo {#foo}", "bar.md": "# Bar {#bar}"}, raw_redirects={ - 'foo': ['foo.html#foo', 'foo-prime.html'], - 'bar': ['bar.html#bar', 'bar-prime.html'], + "foo": ["foo.html#foo", "foo-prime.html"], + "bar": ["bar.html#bar", "bar-prime.html"], }, ) self.run_test(md) server_redirects = md._redirects.get_server_redirects() - expected_redirects = {'foo-prime.html': 'foo.html', 'bar-prime.html': 'bar.html'} + expected_redirects = { + "foo-prime.html": "foo.html", + "bar-prime.html": "bar.html", + } self.assertEqual(server_redirects, expected_redirects) def test_client_redirects_to_ghost_paths(self): @@ -248,12 +278,12 @@ def test_client_redirects_to_ghost_paths(self): md = self.setup_test( sources={"foo.md": "# Foo {#foo}", "bar.md": "# Bar {#bar}"}, raw_redirects={ - 'foo': ['foo.html#foo', 'foo-prime.html'], - 'bar': ['bar.html#bar', 'foo-prime.html#old'], + "foo": ["foo.html#foo", "foo-prime.html"], + "bar": ["bar.html#bar", "foo-prime.html#old"], }, ) self.run_test(md) client_redirects = md._redirects.get_client_redirects("foo.html") - expected_redirects = {'old': 'bar.html#bar'} + expected_redirects = {"old": "bar.html#bar"} self.assertEqual(client_redirects, expected_redirects) diff --git a/pkgs/by-name/oc/ocis_5-bin/update.py b/pkgs/by-name/oc/ocis_5-bin/update.py index c2f717cb41d59..bbf5396ef4c02 100755 --- a/pkgs/by-name/oc/ocis_5-bin/update.py +++ b/pkgs/by-name/oc/ocis_5-bin/update.py @@ -9,6 +9,7 @@ standard library modules, we avoid dependencies on third-party libraries, which simplifies deployment and improves portability. """ + import urllib.request import os import subprocess @@ -24,6 +25,7 @@ MAJOR_VERSION = 5 PKG_NAME = f"ocis_{MAJOR_VERSION}-5" + class TableParser(HTMLParser): def __init__(self, version): super().__init__() diff --git a/pkgs/by-name/os/osquery/update.py b/pkgs/by-name/os/osquery/update.py index 3bb24c68a528a..0176ca2693fb5 100644 --- a/pkgs/by-name/os/osquery/update.py +++ b/pkgs/by-name/os/osquery/update.py @@ -5,35 +5,35 @@ import sys import urllib.request -OWNER = 'osquery' -REPO = 'osquery' +OWNER = "osquery" +REPO = "osquery" OPENSSL_VERSION_PAT = re.compile(r'^set\(OPENSSL_VERSION "(.*)"\)') OPENSSL_SHA256_PAT = re.compile(r'^set\(OPENSSL_ARCHIVE_SHA256 "(.*)"\)') -INFO_PATH = 'pkgs/by-name/os/osquery/info.json' +INFO_PATH = "pkgs/by-name/os/osquery/info.json" def download_str(url): - return urllib.request.urlopen(url).read().decode('utf-8') + return urllib.request.urlopen(url).read().decode("utf-8") def get_latest_tag(): - latest_url = f'https://api.github.com/repos/{OWNER}/{REPO}/releases/latest' - return json.loads(download_str(latest_url))['tag_name'] + latest_url = f"https://api.github.com/repos/{OWNER}/{REPO}/releases/latest" + return json.loads(download_str(latest_url))["tag_name"] def read_info(): - with open(INFO_PATH, 'r') as f: + with open(INFO_PATH, "r") as f: return json.load(f) def write_info(info): - with open(INFO_PATH, 'w') as f: + with open(INFO_PATH, "w") as f: json.dump(info, f, indent=4, sort_keys=True) - f.write('\n') + f.write("\n") def sha256_hex_to_sri(hex): - return 'sha256-' + base64.b64encode(bytes.fromhex(hex)).decode() + return "sha256-" + base64.b64encode(bytes.fromhex(hex)).decode() def openssl_info_from_cmake(cmake): @@ -52,58 +52,59 @@ def openssl_info_from_cmake(cmake): break if version is None or sha256 is None: - raise Exception('Failed to extract openssl fetch info') + raise Exception("Failed to extract openssl fetch info") return { - 'url': f'https://www.openssl.org/source/openssl-{version}.tar.gz', - 'hash': sha256_hex_to_sri(sha256) + "url": f"https://www.openssl.org/source/openssl-{version}.tar.gz", + "hash": sha256_hex_to_sri(sha256), } def openssl_info_for_rev(rev): - url = f'https://raw.githubusercontent.com/{OWNER}/{REPO}/{rev}/libraries/cmake/formula/openssl/CMakeLists.txt' # noqa: E501 + url = f"https://raw.githubusercontent.com/{OWNER}/{REPO}/{rev}/libraries/cmake/formula/openssl/CMakeLists.txt" # noqa: E501 return openssl_info_from_cmake(download_str(url)) -force = len(sys.argv) == 2 and sys.argv[1] == '--force' +force = len(sys.argv) == 2 and sys.argv[1] == "--force" latest_tag = get_latest_tag() -print(f'osquery_latest_tag: {latest_tag}') +print(f"osquery_latest_tag: {latest_tag}") if not force: old_info = read_info() - if latest_tag == old_info['osquery']['rev']: - print('latest tag matches existing rev. exiting') + if latest_tag == old_info["osquery"]["rev"]: + print("latest tag matches existing rev. exiting") sys.exit(0) openssl_fetch_info = openssl_info_for_rev(latest_tag) -print(f'openssl_info: {openssl_fetch_info}') - -prefetch = json.loads(subprocess.check_output([ - 'nix-prefetch-git', - '--fetch-submodules', - '--quiet', - f'https://github.com/{OWNER}/{REPO}', - latest_tag -])) - -prefetch_hash = prefetch['hash'] +print(f"openssl_info: {openssl_fetch_info}") + +prefetch = json.loads( + subprocess.check_output( + [ + "nix-prefetch-git", + "--fetch-submodules", + "--quiet", + f"https://github.com/{OWNER}/{REPO}", + latest_tag, + ] + ) +) + +prefetch_hash = prefetch["hash"] github_fetch_info = { - 'owner': OWNER, - 'repo': REPO, - 'rev': latest_tag, - 'hash': prefetch_hash, - 'fetchSubmodules': True + "owner": OWNER, + "repo": REPO, + "rev": latest_tag, + "hash": prefetch_hash, + "fetchSubmodules": True, } -print(f'osquery_hash: {prefetch_hash}') +print(f"osquery_hash: {prefetch_hash}") -new_info = { - 'osquery': github_fetch_info, - 'openssl': openssl_fetch_info -} +new_info = {"osquery": github_fetch_info, "openssl": openssl_fetch_info} -print(f'osquery_info: {new_info}') +print(f"osquery_info: {new_info}") write_info(new_info) diff --git a/pkgs/by-name/pi/picoscope/update.py b/pkgs/by-name/pi/picoscope/update.py index ecbd2292030d4..632ef93fcaab3 100755 --- a/pkgs/by-name/pi/picoscope/update.py +++ b/pkgs/by-name/pi/picoscope/update.py @@ -5,13 +5,15 @@ import requests import sys + def parse_packages(text): res = [] for package in resp.text.split("\n\n"): - if not package: continue + if not package: + continue pkg = {} for field in package.split("\n"): - if field.startswith(" "): # multiline string + if field.startswith(" "): # multiline string pkg[k] += "\n" + field[1:] else: [k, v] = field.split(": ", 1) @@ -19,26 +21,36 @@ def parse_packages(text): res.append(pkg) return res + def generate_sources(packages): sources_spec = {} for pkg in pkgs: - sources_spec[pkg['Package']] = { + sources_spec[pkg["Package"]] = { "url": "https://labs.picotech.com/rc/picoscope7/debian/" + pkg["Filename"], "sha256": pkg["SHA256"], - "version": pkg["Version"] + "version": pkg["Version"], } return sources_spec + out = {} for nix_system, release in {"x86_64-linux": "amd64"}.items(): - resp = requests.get("https://labs.picotech.com/rc/picoscope7/debian//dists/picoscope/main/binary-"+release+"/Packages") + resp = requests.get( + "https://labs.picotech.com/rc/picoscope7/debian//dists/picoscope/main/binary-" + + release + + "/Packages" + ) if resp.status_code != 200: - print("error: could not fetch data for release {} (code {})".format(release, resp.code), file=sys.stderr) + print( + "error: could not fetch data for release {} (code {})".format( + release, resp.code + ), + file=sys.stderr, + ) sys.exit(1) pkgs = parse_packages(resp.text) out[nix_system] = generate_sources(pkgs) with open(os.path.dirname(__file__) + "/sources.json", "w") as f: json.dump(out, f, indent=2, sort_keys=True) - f.write('\n') - + f.write("\n") diff --git a/pkgs/by-name/pr/prowlarr/update.py b/pkgs/by-name/pr/prowlarr/update.py index 4bb1336881388..7e152fbe50b2c 100644 --- a/pkgs/by-name/pr/prowlarr/update.py +++ b/pkgs/by-name/pr/prowlarr/update.py @@ -29,10 +29,12 @@ def nix_hash_to_sri(hash): return subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "hash", "to-sri", - "--type", "sha256", + "--type", + "sha256", "--", hash, ], @@ -45,26 +47,31 @@ def nix_hash_to_sri(hash): nixpkgs_path = "." attr_path = os.getenv("UPDATE_NIX_ATTR_PATH", "prowlarr") -package_attrs = json.loads(subprocess.run( - [ - "nix", - "--extra-experimental-features", "nix-command", - "eval", - "--json", - "--file", nixpkgs_path, - "--apply", """p: { +package_attrs = json.loads( + subprocess.run( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "eval", + "--json", + "--file", + nixpkgs_path, + "--apply", + """p: { dir = dirOf p.meta.position; version = p.version; sourceHash = p.src.src.outputHash; yarnHash = p.yarnOfflineCache.outputHash; }""", - "--", - attr_path, - ], - stdout=subprocess.PIPE, - text=True, - check=True, -).stdout) + "--", + attr_path, + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ).stdout +) old_version = package_attrs["version"] new_version = old_version @@ -82,18 +89,23 @@ def nix_hash_to_sri(hash): if new_version == old_version: sys.exit() -source_nix_hash, source_store_path = subprocess.run( - [ - "nix-prefetch-url", - "--name", "source", - "--unpack", - "--print-path", - f"https://github.com/Prowlarr/Prowlarr/archive/v{new_version}.tar.gz", - ], - stdout=subprocess.PIPE, - text=True, - check=True, -).stdout.rstrip().split("\n") +source_nix_hash, source_store_path = ( + subprocess.run( + [ + "nix-prefetch-url", + "--name", + "source", + "--unpack", + "--print-path", + f"https://github.com/Prowlarr/Prowlarr/archive/v{new_version}.tar.gz", + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ) + .stdout.rstrip() + .split("\n") +) old_source_hash = package_attrs["sourceHash"] new_source_hash = nix_hash_to_sri(source_nix_hash) @@ -110,12 +122,15 @@ def nix_hash_to_sri(hash): shutil.copytree(package_dir, work_dir, dirs_exist_ok=True) - replace_in_file(package_file, { - # NB unlike hashes, versions are likely to be used in code or comments. - # Try to be more specific to avoid false positive matches. - f"version = \"{old_version}\"": f"version = \"{new_version}\"", - old_source_hash: new_source_hash, - }) + replace_in_file( + package_file, + { + # NB unlike hashes, versions are likely to be used in code or comments. + # Try to be more specific to avoid false positive matches. + f'version = "{old_version}"': f'version = "{new_version}"', + old_source_hash: new_source_hash, + }, + ) # We need access to the patched and updated src to get the patched # `yarn.lock`. @@ -123,48 +138,65 @@ def nix_hash_to_sri(hash): subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "build", "--impure", - "--nix-path", "", - "--include", f"nixpkgs={nixpkgs_path}", - "--include", f"package={package_file}", - "--expr", "(import { }).callPackage { }", - "--out-link", patched_src, + "--nix-path", + "", + "--include", + f"nixpkgs={nixpkgs_path}", + "--include", + f"package={package_file}", + "--expr", + "(import { }).callPackage { }", + "--out-link", + patched_src, "src", ], check=True, ) old_yarn_hash = package_attrs["yarnHash"] - new_yarn_hash = nix_hash_to_sri(subprocess.run( - [ - "prefetch-yarn-deps", - # does not support "--" separator :( - # Also --verbose writes to stdout, yikes. - os.path.join(patched_src, "yarn.lock"), - ], - stdout=subprocess.PIPE, - text=True, - check=True, - ).stdout.rstrip()) + new_yarn_hash = nix_hash_to_sri( + subprocess.run( + [ + "prefetch-yarn-deps", + # does not support "--" separator :( + # Also --verbose writes to stdout, yikes. + os.path.join(patched_src, "yarn.lock"), + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ).stdout.rstrip() + ) - replace_in_file(package_file, { - old_yarn_hash: new_yarn_hash, - }) + replace_in_file( + package_file, + { + old_yarn_hash: new_yarn_hash, + }, + ) # Generate nuget-to-json dependency lock file. fetch_deps = os.path.join(work_dir, "fetch-deps") subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "build", "--impure", - "--nix-path", "", - "--include", f"nixpkgs={nixpkgs_path}", - "--include", f"package={package_file}", - "--expr", "(import { }).callPackage { }", - "--out-link", fetch_deps, + "--nix-path", + "", + "--include", + f"nixpkgs={nixpkgs_path}", + "--include", + f"package={package_file}", + "--expr", + "(import { }).callPackage { }", + "--out-link", + fetch_deps, "passthru.fetch-deps", ], check=True, diff --git a/pkgs/by-name/pu/pulumi/plugins/pulumi-python/smoke-test/__main__.py b/pkgs/by-name/pu/pulumi/plugins/pulumi-python/smoke-test/__main__.py index 43b7d1a8e8959..8c21f0eb9639b 100644 --- a/pkgs/by-name/pu/pulumi/plugins/pulumi-python/smoke-test/__main__.py +++ b/pkgs/by-name/pu/pulumi/plugins/pulumi-python/smoke-test/__main__.py @@ -11,8 +11,8 @@ def create(self, inputs): class Random(Resource): - def __init__(self, name, opts = None): - super().__init__(RandomProvider(), name, {}, opts) + def __init__(self, name, opts=None): + super().__init__(RandomProvider(), name, {}, opts) export("out", Random(name="random_test").id) diff --git a/pkgs/by-name/ra/racket/update.py b/pkgs/by-name/ra/racket/update.py index 284c77bce1d60..eec12499ff1eb 100755 --- a/pkgs/by-name/ra/racket/update.py +++ b/pkgs/by-name/ra/racket/update.py @@ -9,18 +9,15 @@ SITE = "https://download.racket-lang.org" MANIFEST_FILENAME = "manifest.json" + def find_info(table, group_name, subgroup_name): - group = table.find( - string=re.compile("^{}\\s*".format(group_name)) - ).find_parent("tr", class_="group") + group = table.find(string=re.compile("^{}\\s*".format(group_name))).find_parent( + "tr", class_="group" + ) subgroup = group.find_next( string=re.compile("^{}\\s*".format(subgroup_name)) - ).find_parent(class_="subgroup") - link = subgroup.find_next( - "a", - class_="installer", - string="Source" - ) + ).find_parent(class_="subgroup") + link = subgroup.find_next("a", class_="installer", string="Source") filename = link["href"].split("/")[1] sha256 = link.find_next(class_="checksum").string @@ -29,23 +26,20 @@ def find_info(table, group_name, subgroup_name): "sha256": sha256, } + os.chdir(os.path.dirname(os.path.abspath(__file__))) prev_version = os.environ["UPDATE_NIX_OLD_VERSION"] homepage = BeautifulSoup(requests.get(SITE).text, "html.parser") -version = homepage.find( - "h3", - string=re.compile("^Version \\d+\\.\\d+") -).string.split()[1] +version = homepage.find("h3", string=re.compile("^Version \\d+\\.\\d+")).string.split()[ + 1 +] if version == prev_version: raise Exception("no newer version available") -down_page_path = homepage.find( - "a", - string="More Installers and Checksums" -)["href"] +down_page_path = homepage.find("a", string="More Installers and Checksums")["href"] down_page = BeautifulSoup(requests.get(SITE + "/" + down_page_path).text, "html.parser") down_table = down_page.find(class_="download-table") @@ -53,21 +47,29 @@ def find_info(table, group_name, subgroup_name): minimal = find_info(down_table, "Minimal Racket", "All Platforms") with open(MANIFEST_FILENAME, "w", encoding="utf-8") as f: - json.dump({ - "version": version, - "full": full, - "minimal": minimal, - }, f, indent=2, ensure_ascii=False) - f.write("\n") - -print(json.dumps( - [ + json.dump( { - "attrPath": os.environ["UPDATE_NIX_ATTR_PATH"], - "oldVersion": prev_version, - "newVersion": version, - "files": [ os.path.abspath(MANIFEST_FILENAME) ], + "version": version, + "full": full, + "minimal": minimal, }, - ], - indent=2, ensure_ascii=False -)) + f, + indent=2, + ensure_ascii=False, + ) + f.write("\n") + +print( + json.dumps( + [ + { + "attrPath": os.environ["UPDATE_NIX_ATTR_PATH"], + "oldVersion": prev_version, + "newVersion": version, + "files": [os.path.abspath(MANIFEST_FILENAME)], + }, + ], + indent=2, + ensure_ascii=False, + ) +) diff --git a/pkgs/by-name/ra/radarr/update.py b/pkgs/by-name/ra/radarr/update.py index 614abee64279c..3a7298369c20a 100644 --- a/pkgs/by-name/ra/radarr/update.py +++ b/pkgs/by-name/ra/radarr/update.py @@ -29,10 +29,12 @@ def nix_hash_to_sri(hash): return subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "hash", "to-sri", - "--type", "sha256", + "--type", + "sha256", "--", hash, ], @@ -45,26 +47,31 @@ def nix_hash_to_sri(hash): nixpkgs_path = "." attr_path = os.getenv("UPDATE_NIX_ATTR_PATH", "radarr") -package_attrs = json.loads(subprocess.run( - [ - "nix", - "--extra-experimental-features", "nix-command", - "eval", - "--json", - "--file", nixpkgs_path, - "--apply", """p: { +package_attrs = json.loads( + subprocess.run( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "eval", + "--json", + "--file", + nixpkgs_path, + "--apply", + """p: { dir = dirOf p.meta.position; version = p.version; sourceHash = p.src.src.outputHash; yarnHash = p.yarnOfflineCache.outputHash; }""", - "--", - attr_path, - ], - stdout=subprocess.PIPE, - text=True, - check=True, -).stdout) + "--", + attr_path, + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ).stdout +) old_version = package_attrs["version"] new_version = old_version @@ -82,18 +89,23 @@ def nix_hash_to_sri(hash): if new_version == old_version: sys.exit() -source_nix_hash, source_store_path = subprocess.run( - [ - "nix-prefetch-url", - "--name", "source", - "--unpack", - "--print-path", - f"https://github.com/Radarr/Radarr/archive/v{new_version}.tar.gz", - ], - stdout=subprocess.PIPE, - text=True, - check=True, -).stdout.rstrip().split("\n") +source_nix_hash, source_store_path = ( + subprocess.run( + [ + "nix-prefetch-url", + "--name", + "source", + "--unpack", + "--print-path", + f"https://github.com/Radarr/Radarr/archive/v{new_version}.tar.gz", + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ) + .stdout.rstrip() + .split("\n") +) old_source_hash = package_attrs["sourceHash"] new_source_hash = nix_hash_to_sri(source_nix_hash) @@ -110,12 +122,15 @@ def nix_hash_to_sri(hash): shutil.copytree(package_dir, work_dir, dirs_exist_ok=True) - replace_in_file(package_file, { - # NB unlike hashes, versions are likely to be used in code or comments. - # Try to be more specific to avoid false positive matches. - f"version = \"{old_version}\"": f"version = \"{new_version}\"", - old_source_hash: new_source_hash, - }) + replace_in_file( + package_file, + { + # NB unlike hashes, versions are likely to be used in code or comments. + # Try to be more specific to avoid false positive matches. + f'version = "{old_version}"': f'version = "{new_version}"', + old_source_hash: new_source_hash, + }, + ) # We need access to the patched and updated src to get the patched # `yarn.lock`. @@ -123,48 +138,65 @@ def nix_hash_to_sri(hash): subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "build", "--impure", - "--nix-path", "", - "--include", f"nixpkgs={nixpkgs_path}", - "--include", f"package={package_file}", - "--expr", "(import { }).callPackage { }", - "--out-link", patched_src, + "--nix-path", + "", + "--include", + f"nixpkgs={nixpkgs_path}", + "--include", + f"package={package_file}", + "--expr", + "(import { }).callPackage { }", + "--out-link", + patched_src, "src", ], check=True, ) old_yarn_hash = package_attrs["yarnHash"] - new_yarn_hash = nix_hash_to_sri(subprocess.run( - [ - "prefetch-yarn-deps", - # does not support "--" separator :( - # Also --verbose writes to stdout, yikes. - os.path.join(patched_src, "yarn.lock"), - ], - stdout=subprocess.PIPE, - text=True, - check=True, - ).stdout.rstrip()) + new_yarn_hash = nix_hash_to_sri( + subprocess.run( + [ + "prefetch-yarn-deps", + # does not support "--" separator :( + # Also --verbose writes to stdout, yikes. + os.path.join(patched_src, "yarn.lock"), + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ).stdout.rstrip() + ) - replace_in_file(package_file, { - old_yarn_hash: new_yarn_hash, - }) + replace_in_file( + package_file, + { + old_yarn_hash: new_yarn_hash, + }, + ) # Generate nuget-to-json dependency lock file. fetch_deps = os.path.join(work_dir, "fetch-deps") subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "build", "--impure", - "--nix-path", "", - "--include", f"nixpkgs={nixpkgs_path}", - "--include", f"package={package_file}", - "--expr", "(import { }).callPackage { }", - "--out-link", fetch_deps, + "--nix-path", + "", + "--include", + f"nixpkgs={nixpkgs_path}", + "--include", + f"package={package_file}", + "--expr", + "(import { }).callPackage { }", + "--out-link", + fetch_deps, "passthru.fetch-deps", ], check=True, diff --git a/pkgs/by-name/re/recon-ng/setup.py b/pkgs/by-name/re/recon-ng/setup.py index 44db60ce43a63..c5daa8d959d8f 100644 --- a/pkgs/by-name/re/recon-ng/setup.py +++ b/pkgs/by-name/re/recon-ng/setup.py @@ -1,8 +1,8 @@ from setuptools import setup setup( - name='@pname@', - version='@version@', + name="@pname@", + version="@version@", install_requires=[ "pyyaml", "dnspython", @@ -15,11 +15,7 @@ "dicttoxml", "xlsxwriter", "unicodecsv", - "rq" - ], - scripts=[ - 'recon-ng', - "recon-cli", - "recon-web" + "rq", ], + scripts=["recon-ng", "recon-cli", "recon-web"], ) diff --git a/pkgs/by-name/sa/sapling/gen-deps.py b/pkgs/by-name/sa/sapling/gen-deps.py index ddab0080f6406..1ab9708c84b90 100755 --- a/pkgs/by-name/sa/sapling/gen-deps.py +++ b/pkgs/by-name/sa/sapling/gen-deps.py @@ -14,16 +14,23 @@ from requests import get # Fetch the latest stable release metadata from GitHub -releaseMetadata = get("https://api.github.com/repos/facebook/sapling/releases/latest").json() +releaseMetadata = get( + "https://api.github.com/repos/facebook/sapling/releases/latest" +).json() latestTag = releaseMetadata["tag_name"] latestTarballURL = releaseMetadata["tarball_url"] -[_tarballHash, sourceDirectory] = run( - ["nix-prefetch-url", "--print-path", "--unpack", latestTarballURL], - check=True, - text=True, - stdout=subprocess.PIPE, -).stdout.rstrip().splitlines() +[_tarballHash, sourceDirectory] = ( + run( + ["nix-prefetch-url", "--print-path", "--unpack", latestTarballURL], + check=True, + text=True, + stdout=subprocess.PIPE, + ) + .stdout.rstrip() + .splitlines() +) + def updateCargoLock(): with tempfile.TemporaryDirectory() as tempDir: @@ -34,15 +41,20 @@ def updateCargoLock(): for dirpath, dirnames, filenames in os.walk(sourceDirectory): relativeDirpath = os.path.relpath(dirpath, sourceDirectory) for filename in filenames: - shutil.copy(os.path.join(dirpath, filename), tempDir / relativeDirpath / filename) + shutil.copy( + os.path.join(dirpath, filename), + tempDir / relativeDirpath / filename, + ) for dirname in dirnames: os.mkdir(tempDir / relativeDirpath / dirname) run(["cargo", "fetch"], check=True, cwd=tempDir / "eden" / "scm") shutil.copy(tempDir / "eden" / "scm" / "Cargo.lock", "Cargo.lock") + updateCargoLock() + def nixPrefetchUrl(url): return run( ["nix-prefetch-url", "--type", "sha256", url], diff --git a/pkgs/by-name/se/segger-jlink/update.py b/pkgs/by-name/se/segger-jlink/update.py index 35e8586beb67e..79e32708ae1ae 100755 --- a/pkgs/by-name/se/segger-jlink/update.py +++ b/pkgs/by-name/se/segger-jlink/update.py @@ -11,43 +11,46 @@ from textwrap import indent, dedent -Arch = namedtuple('Architecture', ['os', 'name', 'ext']) +Arch = namedtuple("Architecture", ["os", "name", "ext"]) ARCH_MAP = { - 'x86_64-linux': Arch(os='Linux', name='x86_64', ext='tgz'), - 'i686-linux': Arch(os='Linux', name='i386', ext='tgz'), - 'aarch64-linux': Arch(os='Linux', name='arm64', ext='tgz'), - 'armv7l-linux': Arch(os='Linux', name='arm', ext='tgz'), - 'aarch64-darwin': Arch(os='MacOSX', name='arm64', ext='pkg'), - 'x86_64-darwin': Arch(os='MacOSX', name='x86_64', ext='pkg'), + "x86_64-linux": Arch(os="Linux", name="x86_64", ext="tgz"), + "i686-linux": Arch(os="Linux", name="i386", ext="tgz"), + "aarch64-linux": Arch(os="Linux", name="arm64", ext="tgz"), + "armv7l-linux": Arch(os="Linux", name="arm", ext="tgz"), + "aarch64-darwin": Arch(os="MacOSX", name="arm64", ext="pkg"), + "x86_64-darwin": Arch(os="MacOSX", name="x86_64", ext="pkg"), } def find_latest_jlink_version() -> str: try: - response = requests.get('https://www.segger.com/downloads/jlink/') + response = requests.get("https://www.segger.com/downloads/jlink/") response.raise_for_status() except requests.RequestException as e: raise RuntimeError(f"Error fetching J-Link version: {e}") - soup = BeautifulSoup(response.text, 'html.parser') + soup = BeautifulSoup(response.text, "html.parser") - jlink_download_tile = soup.find(lambda tag: tag.name == 'tbody' and "J-Link Software and Documentation pack" in tag.text) - version_select = jlink_download_tile.find('select') - version = next(o.text for o in version_select.find_all('option')) + jlink_download_tile = soup.find( + lambda tag: tag.name == "tbody" + and "J-Link Software and Documentation pack" in tag.text + ) + version_select = jlink_download_tile.find("select") + version = next(o.text for o in version_select.find_all("option")) if version is None: raise RuntimeError("Could not find the J-Link version on the download page.") - return version.removeprefix('V').replace('.', '') + return version.removeprefix("V").replace(".", "") def nar_hash(version: str, arch: Arch) -> str: - ''' + """ Return the nar hash of 'version' for 'source'. - ''' + """ url = f"https://www.segger.com/downloads/jlink/JLink_{arch.os}_V{version}_{arch.name}.{arch.ext}" try: - response = requests.post(url, data={'accept_license_agreement': 'accepted'}) + response = requests.post(url, data={"accept_license_agreement": "accepted"}) response.raise_for_status() except requests.RequestException as e: raise RuntimeError(f"Error downloading file from {url}: {e}") @@ -55,11 +58,17 @@ def nar_hash(version: str, arch: Arch) -> str: with NamedTemporaryFile() as tmpfile: tmpfile.write(response.content) tmpfile.flush() - output = subprocess.check_output([ - "nix", - "--extra-experimental-features", "nix-command", - "hash", "file", "--sri", tmpfile.name - ]).decode("utf8") + output = subprocess.check_output( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "hash", + "file", + "--sri", + tmpfile.name, + ] + ).decode("utf8") return output.strip() @@ -68,19 +77,22 @@ def update_source(version: str): content = f'version = "{version}";\n' for arch_nix, arch in ARCH_MAP.items(): nhash = nar_hash(version, arch) - content += dedent(f''' + content += ( + dedent(f''' {arch_nix} = {{ os = "{arch.os}"; name = "{arch.name}"; ext = "{arch.ext}"; hash = "{nhash}"; - }};''').strip() + '\n' + }};''').strip() + + "\n" + ) - content = '{\n' + indent(content, ' ') + '}\n' + content = "{\n" + indent(content, " ") + "}\n" - with open(Path(__file__).parent / 'source.nix', 'w') as file: + with open(Path(__file__).parent / "source.nix", "w") as file: file.write(content) -if __name__ == '__main__': +if __name__ == "__main__": update_source(find_latest_jlink_version()) diff --git a/pkgs/by-name/si/signal-desktop-bin/copy-noto-emoji.py b/pkgs/by-name/si/signal-desktop-bin/copy-noto-emoji.py index c873c0b0db4d4..a3eecd0c99e50 100644 --- a/pkgs/by-name/si/signal-desktop-bin/copy-noto-emoji.py +++ b/pkgs/by-name/si/signal-desktop-bin/copy-noto-emoji.py @@ -50,10 +50,10 @@ def _main() -> None: for signal_emoji_names in jumbomoji_packs.values(): for signal_emoji_name in signal_emoji_names: - try: shutil.copy( - noto_png_path / f"emoji_u{emoji_to_noto_name(signal_emoji_name)}.png", + noto_png_path + / f"emoji_u{emoji_to_noto_name(signal_emoji_name)}.png", out_path / signal_emoji_name, ) except FileNotFoundError: diff --git a/pkgs/by-name/so/sommelier/update.py b/pkgs/by-name/so/sommelier/update.py index 9c514696cbaa5..d8d04ff9b07a4 100755 --- a/pkgs/by-name/so/sommelier/update.py +++ b/pkgs/by-name/so/sommelier/update.py @@ -18,11 +18,13 @@ # branch branches are used for fixes for specific devices. So for # Chromium OS they will always be 0. This is a best guess, and is not # documented. -with urlopen('https://chromiumdash.appspot.com/cros/download_serving_builds_csv?deviceCategory=ChromeOS') as resp: +with urlopen( + "https://chromiumdash.appspot.com/cros/download_serving_builds_csv?deviceCategory=ChromeOS" +) as resp: reader = csv.reader(map(bytes.decode, resp)) header = next(reader) - cr_stable_index = header.index('cr_stable') - cros_stable_index = header.index('cros_stable') + cr_stable_index = header.index("cr_stable") + cros_stable_index = header.index("cros_stable") chrome_version = [] platform_version = [] @@ -30,32 +32,36 @@ this_chrome_version_str = line[cr_stable_index] if "no update" in this_chrome_version_str: continue - this_chrome_version = list(map(int, this_chrome_version_str.split('.'))) - this_platform_version = list(map(int, line[cros_stable_index].split('.'))) + this_chrome_version = list(map(int, this_chrome_version_str.split("."))) + this_platform_version = list(map(int, line[cros_stable_index].split("."))) chrome_version = max(chrome_version, this_chrome_version) platform_version = max(platform_version, this_platform_version) chrome_major_version = chrome_version[0] chromeos_tip_build = platform_version[0] -release_branch = f'release-R{chrome_major_version}-{chromeos_tip_build}.B' +release_branch = f"release-R{chrome_major_version}-{chromeos_tip_build}.B" # Determine the git revision. -with urlopen(f'https://chromium.googlesource.com/chromiumos/platform2/+/refs/heads/{release_branch}?format=JSON') as resp: - resp.readline() # Remove )]}' header - rev = json.load(resp)['commit'] +with urlopen( + f"https://chromium.googlesource.com/chromiumos/platform2/+/refs/heads/{release_branch}?format=JSON" +) as resp: + resp.readline() # Remove )]}' header + rev = json.load(resp)["commit"] # Determine the patch version by counting the commits that have been # added to the release branch since it forked off the chromeos branch. -with urlopen(f'https://chromium.googlesource.com/chromiumos/platform2/+log/refs/heads/main..{rev}/vm_tools/sommelier?format=JSON') as resp: - resp.readline() # Remove )]}' header - branch_commits = json.load(resp)['log'] - version = f'{chrome_major_version}.{len(branch_commits)}' +with urlopen( + f"https://chromium.googlesource.com/chromiumos/platform2/+log/refs/heads/main..{rev}/vm_tools/sommelier?format=JSON" +) as resp: + resp.readline() # Remove )]}' header + branch_commits = json.load(resp)["log"] + version = f"{chrome_major_version}.{len(branch_commits)}" # Update the version, git revision, and hash in sommelier's default.nix. -subprocess.run(['update-source-version', 'sommelier', f'--rev={rev}', version]) +subprocess.run(["update-source-version", "sommelier", f"--rev={rev}", version]) # Find the path to sommelier's default.nix, so Cargo.lock can be written # into the same directory. -argv = ['nix-instantiate', '--eval', '--json', '-A', 'sommelier.meta.position'] -position = json.loads(subprocess.check_output(argv).decode('utf-8')) -filename = re.match(r'[^:]*', position)[0] +argv = ["nix-instantiate", "--eval", "--json", "-A", "sommelier.meta.position"] +position = json.loads(subprocess.check_output(argv).decode("utf-8")) +filename = re.match(r"[^:]*", position)[0] diff --git a/pkgs/by-name/so/sonarr/update.py b/pkgs/by-name/so/sonarr/update.py index 5c8fd4c837005..02abc6261de0a 100644 --- a/pkgs/by-name/so/sonarr/update.py +++ b/pkgs/by-name/so/sonarr/update.py @@ -29,10 +29,12 @@ def nix_hash_to_sri(hash): return subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "hash", "to-sri", - "--type", "sha256", + "--type", + "sha256", "--", hash, ], @@ -45,26 +47,31 @@ def nix_hash_to_sri(hash): nixpkgs_path = "." attr_path = os.getenv("UPDATE_NIX_ATTR_PATH", "sonarr") -package_attrs = json.loads(subprocess.run( - [ - "nix", - "--extra-experimental-features", "nix-command", - "eval", - "--json", - "--file", nixpkgs_path, - "--apply", """p: { +package_attrs = json.loads( + subprocess.run( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "eval", + "--json", + "--file", + nixpkgs_path, + "--apply", + """p: { dir = dirOf p.meta.position; version = p.version; sourceHash = p.src.src.outputHash; yarnHash = p.yarnOfflineCache.outputHash; }""", - "--", - attr_path, - ], - stdout=subprocess.PIPE, - text=True, - check=True, -).stdout) + "--", + attr_path, + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ).stdout +) old_version = package_attrs["version"] new_version = old_version @@ -81,18 +88,23 @@ def nix_hash_to_sri(hash): if new_version == old_version: sys.exit() -source_nix_hash, source_store_path = subprocess.run( - [ - "nix-prefetch-url", - "--name", "source", - "--unpack", - "--print-path", - f"https://github.com/Sonarr/Sonarr/archive/v{new_version}.tar.gz", - ], - stdout=subprocess.PIPE, - text=True, - check=True, -).stdout.rstrip().split("\n") +source_nix_hash, source_store_path = ( + subprocess.run( + [ + "nix-prefetch-url", + "--name", + "source", + "--unpack", + "--print-path", + f"https://github.com/Sonarr/Sonarr/archive/v{new_version}.tar.gz", + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ) + .stdout.rstrip() + .split("\n") +) old_source_hash = package_attrs["sourceHash"] new_source_hash = nix_hash_to_sri(source_nix_hash) @@ -109,12 +121,15 @@ def nix_hash_to_sri(hash): shutil.copytree(package_dir, work_dir, dirs_exist_ok=True) - replace_in_file(package_file, { - # NB unlike hashes, versions are likely to be used in code or comments. - # Try to be more specific to avoid false positive matches. - f"version = \"{old_version}\"": f"version = \"{new_version}\"", - old_source_hash: new_source_hash, - }) + replace_in_file( + package_file, + { + # NB unlike hashes, versions are likely to be used in code or comments. + # Try to be more specific to avoid false positive matches. + f'version = "{old_version}"': f'version = "{new_version}"', + old_source_hash: new_source_hash, + }, + ) # We need access to the patched and updated src to get the patched # `yarn.lock`. @@ -122,48 +137,65 @@ def nix_hash_to_sri(hash): subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "build", "--impure", - "--nix-path", "", - "--include", f"nixpkgs={nixpkgs_path}", - "--include", f"package={package_file}", - "--expr", "(import { }).callPackage { }", - "--out-link", patched_src, + "--nix-path", + "", + "--include", + f"nixpkgs={nixpkgs_path}", + "--include", + f"package={package_file}", + "--expr", + "(import { }).callPackage { }", + "--out-link", + patched_src, "src", ], check=True, ) old_yarn_hash = package_attrs["yarnHash"] - new_yarn_hash = nix_hash_to_sri(subprocess.run( - [ - "prefetch-yarn-deps", - # does not support "--" separator :( - # Also --verbose writes to stdout, yikes. - os.path.join(patched_src, "yarn.lock"), - ], - stdout=subprocess.PIPE, - text=True, - check=True, - ).stdout.rstrip()) + new_yarn_hash = nix_hash_to_sri( + subprocess.run( + [ + "prefetch-yarn-deps", + # does not support "--" separator :( + # Also --verbose writes to stdout, yikes. + os.path.join(patched_src, "yarn.lock"), + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ).stdout.rstrip() + ) - replace_in_file(package_file, { - old_yarn_hash: new_yarn_hash, - }) + replace_in_file( + package_file, + { + old_yarn_hash: new_yarn_hash, + }, + ) # Generate nuget-to-json dependency lock file. fetch_deps = os.path.join(work_dir, "fetch-deps") subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "build", "--impure", - "--nix-path", "", - "--include", f"nixpkgs={nixpkgs_path}", - "--include", f"package={package_file}", - "--expr", "(import { }).callPackage { }", - "--out-link", fetch_deps, + "--nix-path", + "", + "--include", + f"nixpkgs={nixpkgs_path}", + "--include", + f"package={package_file}", + "--expr", + "(import { }).callPackage { }", + "--out-link", + fetch_deps, "passthru.fetch-deps", ], check=True, diff --git a/pkgs/by-name/st/stash/update.py b/pkgs/by-name/st/stash/update.py index 85747eb654b22..c540c8c7b755e 100755 --- a/pkgs/by-name/st/stash/update.py +++ b/pkgs/by-name/st/stash/update.py @@ -18,6 +18,7 @@ def run_external(args: list[str]): return proc.stdout.strip().decode("utf8") + def get_latest_release_tag(): req = Request("https://api.github.com/repos/stashapp/stash/tags?per_page=1") @@ -27,13 +28,23 @@ def get_latest_release_tag(): with urlopen(req) as resp: return json.loads(resp.read())[0] + def prefetch_github(rev: str): print(f"Prefetching stashapp/stash({rev})") - proc = run_external(["nix-prefetch-git", "--no-deepClone", "--rev", rev, f"https://github.com/stashapp/stash"]) + proc = run_external( + [ + "nix-prefetch-git", + "--no-deepClone", + "--rev", + rev, + f"https://github.com/stashapp/stash", + ] + ) return json.loads(proc) + def prefetch_yarn(lock_file: str): print(f"Prefetching yarn deps") @@ -41,9 +52,10 @@ def prefetch_yarn(lock_file: str): return run_external(["nix", "hash", "convert", "--hash-algo", "sha256", hash]) + def prefetch_go_modules(src: str, version: str): print(f"Prefetching go modules") - expr = fr""" + expr = rf""" {{ sha256 }}: (buildGoModule {{ pname = "stash"; src = {src}; @@ -51,32 +63,31 @@ def prefetch_go_modules(src: str, version: str): vendorHash = "sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="; }}).goModules.overrideAttrs (_: {{ modSha256 = sha256; }}) """ - return run_external([ - "nix-prefetch", - "--option", - "extra-experimental-features", - "flakes", - expr - ]) + return run_external( + ["nix-prefetch", "--option", "extra-experimental-features", "flakes", expr] + ) def save_version_json(version: dict[str, str]): print("Writing version.json") - with open(Path(__file__).parent / "version.json", 'w') as f: + with open(Path(__file__).parent / "version.json", "w") as f: json.dump(version, f, indent=2) f.write("\n") + if __name__ == "__main__": release = get_latest_release_tag() - src = prefetch_github(release['name']) + src = prefetch_github(release["name"]) yarn_hash = prefetch_yarn(f"{src['path']}/ui/v2.5/yarn.lock") - save_version_json({ - "version": release["name"][1:], - "gitHash": release["commit"]["sha"][:8], - "srcHash": src["hash"], - "yarnHash": yarn_hash, - "vendorHash": prefetch_go_modules(src["path"], release["name"][1:]) - }) + save_version_json( + { + "version": release["name"][1:], + "gitHash": release["commit"]["sha"][:8], + "srcHash": src["hash"], + "yarnHash": yarn_hash, + "vendorHash": prefetch_go_modules(src["path"], release["name"][1:]), + } + ) diff --git a/pkgs/by-name/st/steam-unwrapped/update.py b/pkgs/by-name/st/steam-unwrapped/update.py index e49014fee8999..bda6a8f62aec8 100755 --- a/pkgs/by-name/st/steam-unwrapped/update.py +++ b/pkgs/by-name/st/steam-unwrapped/update.py @@ -7,11 +7,11 @@ import subprocess from bs4 import BeautifulSoup -VERSION_PATTERN = re.compile(r'^steam_(?P(\d+\.)+)tar.gz$') +VERSION_PATTERN = re.compile(r"^steam_(?P(\d+\.)+)tar.gz$") found_versions = [] response = requests.get("https://repo.steampowered.com/steam/archive/stable/") -soup = BeautifulSoup (response.text, "html.parser") +soup = BeautifulSoup(response.text, "html.parser") for a in soup.find_all("a"): href = a["href"] if not href.endswith(".tar.gz"): diff --git a/pkgs/by-name/up/update-python-libraries/update-python-libraries.py b/pkgs/by-name/up/update-python-libraries/update-python-libraries.py index b8e1b81b437a7..d32a0fd6d2277 100755 --- a/pkgs/by-name/up/update-python-libraries/update-python-libraries.py +++ b/pkgs/by-name/up/update-python-libraries/update-python-libraries.py @@ -283,14 +283,21 @@ def get_prefix(string): releases = list(filter(lambda x: not x["prerelease"], all_releases)) if len(releases) == 0: - logging.warning(f"{homepage} does not contain any stable releases, looking for tags instead...") + logging.warning( + f"{homepage} does not contain any stable releases, looking for tags instead..." + ) url = f"https://api.github.com/repos/{owner}/{repo}/tags" all_tags = _fetch_github(url) # Releases are used with a couple of fields that tags possess as well. We will fake these releases. - releases = [{'tag_name': tag['name'], 'tarball_url': tag['tarball_url']} for tag in all_tags] + releases = [ + {"tag_name": tag["name"], "tarball_url": tag["tarball_url"]} + for tag in all_tags + ] if len(releases) == 0: - raise ValueError(f"{homepage} does not contain any stable releases neither tags, stopping now.") + raise ValueError( + f"{homepage} does not contain any stable releases neither tags, stopping now." + ) versions = map(lambda x: strip_prefix(x["tag_name"]), releases) version = _determine_latest_version(current_version, target, versions) @@ -512,7 +519,7 @@ def _update_package(path, target): text = text.replace('"${version}";', "version;") # update changelog to reference the src.tag - if result := re.search("changelog = \"[^\"]+\";", text): + if result := re.search('changelog = "[^"]+";', text): cl_old = result[0] cl_new = re.sub(r"v?\$\{(version|src.rev)\}", "${src.tag}", cl_old) text = text.replace(cl_old, cl_new) diff --git a/pkgs/by-name/vs/vscode-extension-update/vscode_extension_update.py b/pkgs/by-name/vs/vscode-extension-update/vscode_extension_update.py index 9859c4d3d4302..1366b3baa6a12 100755 --- a/pkgs/by-name/vs/vscode-extension-update/vscode_extension_update.py +++ b/pkgs/by-name/vs/vscode-extension-update/vscode_extension_update.py @@ -89,16 +89,18 @@ def _get_nix_attribute(self, attribute_path: str) -> str: """ Retrieves a raw Nix attribute value. """ - return self.execute_command([ - "nix", - "--extra-experimental-features", - "nix-command", - "eval", - "--raw", - "-f", - ".", - attribute_path - ]) + return self.execute_command( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "eval", + "--raw", + "-f", + ".", + attribute_path, + ] + ) def get_nix_system(self) -> str: """ @@ -107,13 +109,9 @@ def get_nix_system(self) -> str: return self._get_nix_attribute("stdenv.hostPlatform.system") def get_supported_nix_systems(self) -> list[str]: - nix_config = self.execute_command([ - "nix", - "--extra-experimental-features", - "nix-command", - "config", - "show" - ]) + nix_config = self.execute_command( + ["nix", "--extra-experimental-features", "nix-command", "config", "show"] + ) system = None extra_platforms = [] for line in nix_config.splitlines(): @@ -133,31 +131,35 @@ def _has_platform_source(self) -> bool: return "targetPlatform=" in source_url def _get_nix_vscode_extension_src_hash(self, system: str) -> str: - url = self.execute_command([ - "nix", - "--extra-experimental-features", - "nix-command", - "eval", - "--raw", - "-f", - ".", - f"{self.attribute_path}.src.url", - "--system", - system, - ]) + url = self.execute_command( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "eval", + "--raw", + "-f", + ".", + f"{self.attribute_path}.src.url", + "--system", + system, + ] + ) sha256 = self.execute_command(["nix-prefetch-url", url]) - return self.execute_command([ - "nix", - "--extra-experimental-features", - "nix-command", - "hash", - "convert", - "--to", - "sri", - "--hash-algo", - "sha256", - sha256, - ]) + return self.execute_command( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "hash", + "convert", + "--to", + "sri", + "--hash-algo", + "sha256", + sha256, + ] + ) def get_target_platform(self, nix_system: str) -> str: """ @@ -200,16 +202,18 @@ def _get_nix_vscode_extension_platforms(self) -> list[str]: """ try: return json.loads( - self.execute_command([ - "nix", - "--extra-experimental-features", - "nix-command", - "eval", - "--json", - "-f", - ".", - f"{self.attribute_path}.meta.platforms", - ]) + self.execute_command( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "eval", + "--json", + "-f", + ".", + f"{self.attribute_path}.meta.platforms", + ] + ) ) except subprocess.CalledProcessError: return [] @@ -276,12 +280,14 @@ def find_compatible_extension_version( engine_version_constraint ) try: - self.execute_command([ - "semver", - self.target_vscode_version, - "-r", - engine_version_constraint, - ]) + self.execute_command( + [ + "semver", + self.target_vscode_version, + "-r", + engine_version_constraint, + ] + ) logger.info(f"Compatible version found: {candidate_version}") return candidate_version except (ValueError, subprocess.CalledProcessError): @@ -413,12 +419,14 @@ def run(self): self.get_target_platform(self.nix_vscode_extension_platforms[0]), ) try: - self.execute_command([ - "semver", - self.current_version, - "-r", - f"<{self.new_version}", - ]) + self.execute_command( + [ + "semver", + self.current_version, + "-r", + f"<{self.new_version}", + ] + ) except subprocess.CalledProcessError: logger.info("Already up to date or new version is older!") sys.exit(0) @@ -427,12 +435,14 @@ def run(self): self.run_nix_update(version, system) if self.commit: self.execute_command(["git", "add", self.override_filename]) - self.execute_command([ - "git", - "commit", - "-m", - f"{self.attribute_path}: {self.current_version} -> {self.new_version}", - ]) + self.execute_command( + [ + "git", + "commit", + "-m", + f"{self.attribute_path}: {self.current_version} -> {self.new_version}", + ] + ) if __name__ == "__main__": diff --git a/pkgs/by-name/vu/vulkan-cts/vk-cts-sources.py b/pkgs/by-name/vu/vulkan-cts/vk-cts-sources.py index c987b7b6467d5..385be27d494b2 100755 --- a/pkgs/by-name/vu/vulkan-cts/vk-cts-sources.py +++ b/pkgs/by-name/vu/vulkan-cts/vk-cts-sources.py @@ -1,6 +1,6 @@ #!/usr/bin/env nix-shell #!nix-shell -i python3 -p nix-prefetch-github -p git -#nix-shell -I nixpkgs=../../../../ -i python3 -p "python3.withPackages (ps: with ps; [ nix-prefetch-github ])" -p "git" +# nix-shell -I nixpkgs=../../../../ -i python3 -p "python3.withPackages (ps: with ps; [ nix-prefetch-github ])" -p "git" import json import re @@ -9,6 +9,7 @@ import fetch_sources + def get_github_hash(owner, repo, revision): result = subprocess.run( ["nix-prefetch-github", owner, repo, "--json", "--rev", revision], @@ -20,20 +21,22 @@ def get_github_hash(owner, repo, revision): # Remove False values return {k: v for k, v in j.items() if v} + def main(): pkgs = fetch_sources.PACKAGES - pkgs.sort(key = lambda pkg: pkg.baseDir) + pkgs.sort(key=lambda pkg: pkg.baseDir) existing_sources = {} # Fetch hashes from existing sources file with open("sources.nix") as f: existing_file = f.read() - source_re = re.compile("(?P[^ ]+) = fetchFromGitHub[^\n]*\n" - "[^\n]+\n" # owner - "[^\n]+\n" # repo - " *rev = \"(?P[^\"]+)\";\n" - " *hash = \"(?P[^\"]+)\";\n" + source_re = re.compile( + "(?P[^ ]+) = fetchFromGitHub[^\n]*\n" + "[^\n]+\n" # owner + "[^\n]+\n" # repo + ' *rev = "(?P[^"]+)";\n' + ' *hash = "(?P[^"]+)";\n' ) for m in source_re.finditer(existing_file): @@ -42,14 +45,14 @@ def main(): existing_sources[m.group("name")] = (m.group("rev"), m.group("hash")) print() - # Write new sources file with open("sources.nix", "w") as f: f.write("# Autogenerated from vk-cts-sources.py\n") f.write("{ fetchurl, fetchFromGitHub }:\n") - f.write("rec {"); - - github_re = re.compile("https://github.com/(?P[^/]+)/(?P[^/]+).git") + f.write("rec {") + github_re = re.compile( + "https://github.com/(?P[^/]+)/(?P[^/]+).git" + ) for pkg in pkgs: if isinstance(pkg, fetch_sources.GitRepo): @@ -64,30 +67,33 @@ def main(): if hash is None: print(f"Fetching {pkg.baseDir}: {pkg.revision}") - hash = get_github_hash(ms.group("owner"), ms.group("repo"), pkg.revision)["hash"] + hash = get_github_hash( + ms.group("owner"), ms.group("repo"), pkg.revision + )["hash"] print(f"Got {pkg.baseDir}: {pkg.revision} -> {hash}") - f.write(f"\n {pkg.baseDir} = fetchFromGitHub {{\n"); - f.write(f" owner = \"{ms.group('owner')}\";\n"); - f.write(f" repo = \"{ms.group('repo')}\";\n"); - f.write(f" rev = \"{pkg.revision}\";\n"); - f.write(f" hash = \"{hash}\";\n"); - f.write(f" }};\n"); + f.write(f"\n {pkg.baseDir} = fetchFromGitHub {{\n") + f.write(f' owner = "{ms.group("owner")}";\n') + f.write(f' repo = "{ms.group("repo")}";\n') + f.write(f' rev = "{pkg.revision}";\n') + f.write(f' hash = "{hash}";\n') + f.write(f" }};\n") - f.write("\n prePatch = ''\n"); - f.write(" mkdir -p"); + f.write("\n prePatch = ''\n") + f.write(" mkdir -p") for pkg in pkgs: if isinstance(pkg, fetch_sources.GitRepo): f.write(f" external/{pkg.baseDir}") - f.write("\n\n"); - + f.write("\n\n") for pkg in pkgs: if isinstance(pkg, fetch_sources.GitRepo): - f.write(f" cp -r ${{{pkg.baseDir}}} external/{pkg.baseDir}/{pkg.extractDir}\n"); + f.write( + f" cp -r ${{{pkg.baseDir}}} external/{pkg.baseDir}/{pkg.extractDir}\n" + ) - f.write(" '';\n"); + f.write(" '';\n") + f.write("}\n") - f.write("}\n"); if __name__ == "__main__": main() diff --git a/pkgs/by-name/wi/widevine-cdm/update-x86_64.py b/pkgs/by-name/wi/widevine-cdm/update-x86_64.py index c255557f62a8b..a77a312955a69 100755 --- a/pkgs/by-name/wi/widevine-cdm/update-x86_64.py +++ b/pkgs/by-name/wi/widevine-cdm/update-x86_64.py @@ -9,17 +9,20 @@ DEFAULT_JSON = "https://raw.githubusercontent.com/mozilla-firefox/firefox/refs/heads/main/toolkit/content/gmp-sources/widevinecdm.json" ARCH_PATTERNS = { "linux_x86_64": ["Linux_x86_64-gcc3"], - "linux_aarch64": ["Linux_aarch64-gcc3"] + "linux_aarch64": ["Linux_aarch64-gcc3"], } + def fetch_json(url: str) -> dict: with urllib.request.urlopen(url) as r: return json.load(r) + def hex_to_sri(hexstr: str) -> str: b = bytes.fromhex(hexstr) return "sha512-" + base64.b64encode(b).decode() + def find_widevine_vendor(data: dict) -> dict: vendors = data.get("vendors") or {} for key, value in vendors.items(): @@ -28,6 +31,7 @@ def find_widevine_vendor(data: dict) -> dict: raise SystemExit("ERR: couldn't find a widevine vendor entry in JSON !") + def judge_platforms(platforms: dict, patterns) -> Optional[Tuple[str, dict]]: for plkey, plvalue in platforms.items(): if plvalue.get("alias"): @@ -40,6 +44,7 @@ def judge_platforms(platforms: dict, patterns) -> Optional[Tuple[str, dict]]: return None + def normalize_fileurl(entry: dict) -> Optional[str]: if entry.get("fileUrl"): return entry["fileUrl"] @@ -50,13 +55,15 @@ def normalize_fileurl(entry: dict) -> Optional[str]: return None + def extract_ver_from_url(url: str) -> Optional[str]: - m = re.search(r'[_-](\d+\.\d+\.\d+\.\d+)[_-]', url) + m = re.search(r"[_-](\d+\.\d+\.\d+\.\d+)[_-]", url) if m: return m.group(1) - m = re.search(r'(\d+\.\d+\.\d+\.\d+)', url) + m = re.search(r"(\d+\.\d+\.\d+\.\d+)", url) return m.group(1) if m else None + def build_entry(pkey: str, pentry: dict) -> dict: url = normalize_fileurl(pentry) hv = pentry.get("hashValue") @@ -68,6 +75,7 @@ def build_entry(pkey: str, pentry: dict) -> dict: version = extract_ver_from_url(url) return {"platform_key": pkey, "url": url, "sri": sri, "version": version} + def main(): WIDEVINE_DIR = Path(__file__).resolve().parent DEFAULT_FILE = WIDEVINE_DIR / "x86_64-manifest.json" @@ -80,7 +88,9 @@ def main(): print(f"# fetching {args.json_url} !", file=sys.stderr) data = fetch_json(args.json_url) vendor = find_widevine_vendor(data) - platforms = vendor.get("platforms") or {} # should never be null but moz could forseeably delete it + platforms = ( + vendor.get("platforms") or {} + ) # should never be null but moz could forseeably delete it if not platforms: raise SystemExit("ERR: no widevine platforms !") @@ -103,5 +113,6 @@ def main(): args.file.write_text(json.dumps(linux_x64, indent=2) + "\n", encoding="utf-8") print("# updated", file=sys.stderr) + if __name__ == "__main__": main() diff --git a/pkgs/by-name/xb/xborders/setup.py b/pkgs/by-name/xb/xborders/setup.py index a002625fa064d..8d8cd92d77233 100644 --- a/pkgs/by-name/xb/xborders/setup.py +++ b/pkgs/by-name/xb/xborders/setup.py @@ -1,12 +1,12 @@ from setuptools import setup setup( - name='@pname@', - version='@version@', - author='deter0', - description='@desc@', - install_requires=['pycairo', 'requests', 'PyGObject'], + name="@pname@", + version="@version@", + author="deter0", + description="@desc@", + install_requires=["pycairo", "requests", "PyGObject"], scripts=[ - 'xborders', + "xborders", ], ) diff --git a/pkgs/by-name/xk/xkeysnail/emacs.py b/pkgs/by-name/xk/xkeysnail/emacs.py index b9f9639891560..e30011db5e4c1 100644 --- a/pkgs/by-name/xk/xkeysnail/emacs.py +++ b/pkgs/by-name/xk/xkeysnail/emacs.py @@ -3,46 +3,60 @@ from xkeysnail.transform import * aa = False + + def aaset(v): def _aaset(): transform._mark_set = False - global aa; aa = v + global aa + aa = v + return _aaset + + def aaif(): def _aaif(): - global aa; transform._mark_set = False - if aa: aa = False; return K("esc") + global aa + transform._mark_set = False + if aa: + aa = False + return K("esc") return K("enter") + return _aaif + + def aaflip(): def _aaflip(): - transform._mark_set = not transform._mark_set; + transform._mark_set = not transform._mark_set + return _aaflip -define_keymap(re.compile("Google-chrome|Chromium-browser|firefox"), { - K("C-b"): with_mark(K("left")), - K("C-f"): with_mark(K("right")), - K("C-p"): with_mark(K("up")), - K("C-n"): with_mark(K("down")), - K("M-b"): with_mark(K("C-left")), - K("M-f"): with_mark(K("C-right")), - K("C-a"): with_mark(K("home")), - K("C-e"): with_mark(K("end")), - - K("C-w"): [K("C-x"), set_mark(False)], - K("M-w"): [K("C-c"), K("right"), set_mark(False)], - K("C-y"): [K("C-v"), set_mark(False)], - K("C-k"): [K("Shift-end"), K("C-x"), set_mark(False)], - K("C-d"): [K("delete"), set_mark(False)], - K("M-d"): [K("C-delete"), set_mark(False)], - K("M-backspace"): [K("C-backspace"), set_mark(False)], - K("C-slash"): [K("C-z"), set_mark(False)], - - K("C-space"): aaflip(), - K("C-M-space"): with_or_set_mark(K("C-right")), - - K("enter"): aaif(), - K("C-s"): [K("F3"), aaset(True)], - K("C-r"): [K("Shift-F3"), aaset(True)], - K("C-g"): [K("esc"), aaset(False)] -}) + +define_keymap( + re.compile("Google-chrome|Chromium-browser|firefox"), + { + K("C-b"): with_mark(K("left")), + K("C-f"): with_mark(K("right")), + K("C-p"): with_mark(K("up")), + K("C-n"): with_mark(K("down")), + K("M-b"): with_mark(K("C-left")), + K("M-f"): with_mark(K("C-right")), + K("C-a"): with_mark(K("home")), + K("C-e"): with_mark(K("end")), + K("C-w"): [K("C-x"), set_mark(False)], + K("M-w"): [K("C-c"), K("right"), set_mark(False)], + K("C-y"): [K("C-v"), set_mark(False)], + K("C-k"): [K("Shift-end"), K("C-x"), set_mark(False)], + K("C-d"): [K("delete"), set_mark(False)], + K("M-d"): [K("C-delete"), set_mark(False)], + K("M-backspace"): [K("C-backspace"), set_mark(False)], + K("C-slash"): [K("C-z"), set_mark(False)], + K("C-space"): aaflip(), + K("C-M-space"): with_or_set_mark(K("C-right")), + K("enter"): aaif(), + K("C-s"): [K("F3"), aaset(True)], + K("C-r"): [K("Shift-F3"), aaset(True)], + K("C-g"): [K("esc"), aaset(False)], + }, +) diff --git a/pkgs/by-name/ya/yandex-cloud/update.py b/pkgs/by-name/ya/yandex-cloud/update.py index d6309118bf923..b3de604725636 100644 --- a/pkgs/by-name/ya/yandex-cloud/update.py +++ b/pkgs/by-name/ya/yandex-cloud/update.py @@ -30,24 +30,29 @@ def to_goarch(cpu): nixpkgs_path = "." attr_path = os.getenv("UPDATE_NIX_ATTR_PATH", "yandex-cloud") -package_attrs = json.loads(subprocess.run( - [ - "nix", - "--extra-experimental-features", "nix-command", - "eval", - "--json", - "--file", nixpkgs_path, - "--apply", """p: { +package_attrs = json.loads( + subprocess.run( + [ + "nix", + "--extra-experimental-features", + "nix-command", + "eval", + "--json", + "--file", + nixpkgs_path, + "--apply", + """p: { dir = dirOf p.meta.position; version = p.version; }""", - "--", - attr_path, - ], - stdout=subprocess.PIPE, - text=True, - check=True, -).stdout) + "--", + attr_path, + ], + stdout=subprocess.PIPE, + text=True, + check=True, + ).stdout +) old_version = package_attrs["version"] new_version = requests.get(f"{storage_url}/release/stable").text.rstrip() @@ -66,7 +71,8 @@ def to_goarch(cpu): nix_hash = subprocess.run( [ "nix-prefetch-url", - "--type", "sha256", + "--type", + "sha256", url, ], stdout=subprocess.PIPE, @@ -77,10 +83,12 @@ def to_goarch(cpu): sri_hash = subprocess.run( [ "nix", - "--extra-experimental-features", "nix-command", + "--extra-experimental-features", + "nix-command", "hash", "to-sri", - "--type", "sha256", + "--type", + "sha256", "--", nix_hash, ], @@ -96,10 +104,16 @@ def to_goarch(cpu): package_dir = package_attrs["dir"] file_path = os.path.join(package_dir, "sources.json") -file_content = json.dumps({ - "version": new_version, - "binaries": binaries, -}, indent=2) + "\n" +file_content = ( + json.dumps( + { + "version": new_version, + "binaries": binaries, + }, + indent=2, + ) + + "\n" +) with tempfile.NamedTemporaryFile(mode="w") as t: t.write(file_content) diff --git a/pkgs/by-name/ya/yazi/plugins/update.py b/pkgs/by-name/ya/yazi/plugins/update.py index 2a6cbfbafc6fb..1059adfd62ad8 100755 --- a/pkgs/by-name/ya/yazi/plugins/update.py +++ b/pkgs/by-name/ya/yazi/plugins/update.py @@ -27,13 +27,14 @@ def run_command(cmd: str, capture_output: bool = True) -> str: def get_plugin_info(nixpkgs_dir: str, plugin_name: str) -> dict[str, str]: """Get plugin repository information from Nix""" - owner = run_command(f"nix eval --raw -f {nixpkgs_dir} yaziPlugins.\"{plugin_name}\".src.owner") - repo = run_command(f"nix eval --raw -f {nixpkgs_dir} yaziPlugins.\"{plugin_name}\".src.repo") + owner = run_command( + f'nix eval --raw -f {nixpkgs_dir} yaziPlugins."{plugin_name}".src.owner' + ) + repo = run_command( + f'nix eval --raw -f {nixpkgs_dir} yaziPlugins."{plugin_name}".src.repo' + ) - return { - "owner": owner, - "repo": repo - } + return {"owner": owner, "repo": repo} def get_yazi_version(nixpkgs_dir: str) -> str: @@ -41,7 +42,6 @@ def get_yazi_version(nixpkgs_dir: str) -> str: return run_command(f"nix eval --raw -f {nixpkgs_dir} yazi-unwrapped.version") - def get_github_headers() -> dict[str, str]: """Create headers for GitHub API requests""" headers = {"Accept": "application/vnd.github.v3+json"} @@ -65,7 +65,10 @@ def get_default_branch(owner: str, repo: str, headers: dict[str, str]) -> str: print("Falling back to 'main' as default branch") return "main" -def fetch_plugin_content(owner: str, repo: str, plugin_pname: str, headers: dict[str, str]) -> str: + +def fetch_plugin_content( + owner: str, repo: str, plugin_pname: str, headers: dict[str, str] +) -> str: """Fetch the plugin's main.lua content from GitHub""" default_branch = get_default_branch(owner, repo, headers) plugin_path = f"{plugin_pname}/" if owner == "yazi-rs" else "" @@ -79,13 +82,21 @@ def fetch_plugin_content(owner: str, repo: str, plugin_pname: str, headers: dict raise RuntimeError(f"Error fetching plugin content: {e}") -def check_version_compatibility(plugin_content: str, plugin_name: str, yazi_version: str) -> str: +def check_version_compatibility( + plugin_content: str, plugin_name: str, yazi_version: str +) -> str: """Check if the plugin is compatible with the current Yazi version""" - required_version_match = re.search(r"since ([0-9.]+)", plugin_content.split("\n")[0]) - required_version = required_version_match.group(1) if required_version_match else "0" + required_version_match = re.search( + r"since ([0-9.]+)", plugin_content.split("\n")[0] + ) + required_version = ( + required_version_match.group(1) if required_version_match else "0" + ) if required_version == "0": - print(f"No version requirement found for {plugin_name}, assuming compatible with any Yazi version") + print( + f"No version requirement found for {plugin_name}, assuming compatible with any Yazi version" + ) else: if version.parse(required_version) > version.parse(yazi_version): message = f"{plugin_name} plugin requires Yazi {required_version}, but we have {yazi_version}" @@ -95,7 +106,9 @@ def check_version_compatibility(plugin_content: str, plugin_name: str, yazi_vers return required_version -def get_latest_commit(owner: str, repo: str, plugin_pname: str, headers: dict[str, str]) -> tuple[str, str]: +def get_latest_commit( + owner: str, repo: str, plugin_pname: str, headers: dict[str, str] +) -> tuple[str, str]: """Get the latest commit hash and date for the plugin""" default_branch = get_default_branch(owner, repo, headers) @@ -104,7 +117,9 @@ def get_latest_commit(owner: str, repo: str, plugin_pname: str, headers: dict[st api_url = f"https://api.github.com/repos/{owner}/{repo}/commits?path={plugin_pname}/main.lua&per_page=1" else: # For third-party plugins, get latest commit on default branch - api_url = f"https://api.github.com/repos/{owner}/{repo}/commits/{default_branch}" + api_url = ( + f"https://api.github.com/repos/{owner}/{repo}/commits/{default_branch}" + ) try: response = requests.get(api_url, headers=headers) @@ -131,15 +146,25 @@ def calculate_sri_hash(owner: str, repo: str, latest_commit: str) -> str: prefetch_url = f"https://github.com/{owner}/{repo}/archive/{latest_commit}.tar.gz" try: - new_hash = run_command(f"nix-prefetch-url --unpack --type sha256 {prefetch_url} 2>/dev/null") + new_hash = run_command( + f"nix-prefetch-url --unpack --type sha256 {prefetch_url} 2>/dev/null" + ) if not new_hash.startswith("sha256-"): - new_hash = run_command(f"nix --extra-experimental-features nix-command hash to-sri --type sha256 {new_hash} 2>/dev/null") + new_hash = run_command( + f"nix --extra-experimental-features nix-command hash to-sri --type sha256 {new_hash} 2>/dev/null" + ) if not new_hash.startswith("sha256-"): - print("Warning: Failed to get SRI hash directly, trying alternative method...") - raw_hash = run_command(f"nix-prefetch-url --type sha256 {prefetch_url} 2>/dev/null") - new_hash = run_command(f"nix --extra-experimental-features nix-command hash to-sri --type sha256 {raw_hash} 2>/dev/null") + print( + "Warning: Failed to get SRI hash directly, trying alternative method..." + ) + raw_hash = run_command( + f"nix-prefetch-url --type sha256 {prefetch_url} 2>/dev/null" + ) + new_hash = run_command( + f"nix --extra-experimental-features nix-command hash to-sri --type sha256 {raw_hash} 2>/dev/null" + ) except Exception as e: raise RuntimeError(f"Error calculating hash: {e}") @@ -152,7 +177,7 @@ def calculate_sri_hash(owner: str, repo: str, latest_commit: str) -> str: def read_nix_file(file_path: str) -> str: """Read the content of a Nix file""" try: - with open(file_path, 'r') as f: + with open(file_path, "r") as f: return f.read() except IOError as e: raise RuntimeError(f"Error reading file {file_path}: {e}") @@ -161,34 +186,51 @@ def read_nix_file(file_path: str) -> str: def write_nix_file(file_path: str, content: str) -> None: """Write content to a Nix file""" try: - with open(file_path, 'w') as f: + with open(file_path, "w") as f: f.write(content) except IOError as e: raise RuntimeError(f"Error writing to file {file_path}: {e}") -def update_nix_file(default_nix_path: str, latest_commit: str, new_version: str, new_hash: str) -> None: +def update_nix_file( + default_nix_path: str, latest_commit: str, new_version: str, new_hash: str +) -> None: """Update the default.nix file with new version, revision and hash""" default_nix_content = read_nix_file(default_nix_path) - default_nix_content = re.sub(r'rev = "[^"]*"', f'rev = "{latest_commit}"', default_nix_content) + default_nix_content = re.sub( + r'rev = "[^"]*"', f'rev = "{latest_commit}"', default_nix_content + ) if 'version = "' in default_nix_content: - default_nix_content = re.sub(r'version = "[^"]*"', f'version = "{new_version}"', default_nix_content) + default_nix_content = re.sub( + r'version = "[^"]*"', f'version = "{new_version}"', default_nix_content + ) else: - default_nix_content = re.sub(r'(pname = "[^"]*";)', f'\\1\n version = "{new_version}";', default_nix_content) + default_nix_content = re.sub( + r'(pname = "[^"]*";)', + f'\\1\n version = "{new_version}";', + default_nix_content, + ) if 'hash = "' in default_nix_content: - default_nix_content = re.sub(r'hash = "[^"]*"', f'hash = "{new_hash}"', default_nix_content) - elif 'fetchFromGitHub' in default_nix_content: - default_nix_content = re.sub(r'sha256 = "[^"]*"', f'sha256 = "{new_hash}"', default_nix_content) + default_nix_content = re.sub( + r'hash = "[^"]*"', f'hash = "{new_hash}"', default_nix_content + ) + elif "fetchFromGitHub" in default_nix_content: + default_nix_content = re.sub( + r'sha256 = "[^"]*"', f'sha256 = "{new_hash}"', default_nix_content + ) else: raise RuntimeError(f"Could not find hash attribute in {default_nix_path}") write_nix_file(default_nix_path, default_nix_content) updated_content = read_nix_file(default_nix_path) - if f'hash = "{new_hash}"' in updated_content or f'sha256 = "{new_hash}"' in updated_content: + if ( + f'hash = "{new_hash}"' in updated_content + or f'sha256 = "{new_hash}"' in updated_content + ): print(f"Successfully updated hash to: {new_hash}") else: raise RuntimeError(f"Failed to update hash in {default_nix_path}") @@ -197,20 +239,32 @@ def update_nix_file(default_nix_path: str, latest_commit: str, new_version: str, def get_all_plugins(nixpkgs_dir: str) -> list[dict[str, str]]: """Get all available Yazi plugins from the Nix expression""" try: - plugin_names_json = run_command(f'nix eval --impure --json --expr "builtins.attrNames (import {nixpkgs_dir} {{}}).yaziPlugins"') + plugin_names_json = run_command( + f'nix eval --impure --json --expr "builtins.attrNames (import {nixpkgs_dir} {{}}).yaziPlugins"' + ) plugin_names = json.loads(plugin_names_json) - excluded_attrs = ["mkYaziPlugin", "override", "overrideDerivation", "overrideAttrs", "recurseForDerivations"] + excluded_attrs = [ + "mkYaziPlugin", + "override", + "overrideDerivation", + "overrideAttrs", + "recurseForDerivations", + ] plugin_names = [name for name in plugin_names if name not in excluded_attrs] plugins = [] for name in plugin_names: try: - pname = run_command(f'nix eval --raw -f {nixpkgs_dir} "yaziPlugins.{name}.pname"') - plugins.append({ - "name": name, # Attribute name in yaziPlugins set - "pname": pname # Package name (used in repo paths) - }) + pname = run_command( + f'nix eval --raw -f {nixpkgs_dir} "yaziPlugins.{name}.pname"' + ) + plugins.append( + { + "name": name, # Attribute name in yaziPlugins set + "pname": pname, # Package name (used in repo paths) + } + ) except Exception as e: print(f"Warning: Could not get pname for plugin {name}, skipping: {e}") continue @@ -220,7 +274,9 @@ def get_all_plugins(nixpkgs_dir: str) -> list[dict[str, str]]: raise RuntimeError(f"Error getting plugin list: {e}") -def validate_environment(plugin_name: str | None = None, plugin_pname: str | None = None) -> tuple[str, str | None, str | None]: +def validate_environment( + plugin_name: str | None = None, plugin_pname: str | None = None +) -> tuple[str, str | None, str | None]: """Validate environment variables and paths""" nixpkgs_dir = os.getcwd() @@ -230,12 +286,16 @@ def validate_environment(plugin_name: str | None = None, plugin_pname: str | Non if plugin_name: plugin_dir = f"{nixpkgs_dir}/pkgs/by-name/ya/yazi/plugins/{plugin_name}" if not Path(f"{plugin_dir}/default.nix").exists(): - raise RuntimeError(f"Could not find default.nix for plugin {plugin_name} at {plugin_dir}") + raise RuntimeError( + f"Could not find default.nix for plugin {plugin_name} at {plugin_dir}" + ) return nixpkgs_dir, plugin_name, plugin_pname -def update_single_plugin(nixpkgs_dir: str, plugin_name: str, plugin_pname: str) -> dict[str, str] | None: +def update_single_plugin( + nixpkgs_dir: str, plugin_name: str, plugin_pname: str +) -> dict[str, str] | None: """Update a single Yazi plugin Returns: @@ -259,7 +319,9 @@ def update_single_plugin(nixpkgs_dir: str, plugin_name: str, plugin_pname: str) headers = get_github_headers() plugin_content = fetch_plugin_content(owner, repo, plugin_pname, headers) - required_version = check_version_compatibility(plugin_content, plugin_name, yazi_version) + required_version = check_version_compatibility( + plugin_content, plugin_name, yazi_version + ) latest_commit, commit_date = get_latest_commit(owner, repo, plugin_pname, headers) print(f"Checking {plugin_name} latest commit {latest_commit} ({commit_date})") @@ -284,7 +346,7 @@ def update_single_plugin(nixpkgs_dir: str, plugin_name: str, plugin_pname: str) "old_version": old_version, "new_version": new_version, "old_commit": old_commit, - "new_commit": latest_commit + "new_commit": latest_commit, } @@ -317,7 +379,9 @@ def update_all_plugins(nixpkgs_dir: str) -> list[dict[str, str]]: print(f"{'=' * 50}") try: - update_info = update_single_plugin(nixpkgs_dir, plugin_name, plugin_pname) + update_info = update_single_plugin( + nixpkgs_dir, plugin_name, plugin_pname + ) checked_count += 1 if update_info: @@ -336,12 +400,16 @@ def update_all_plugins(nixpkgs_dir: str) -> list[dict[str, str]]: continue print(f"\n{'=' * 50}") - print(f"Update summary: {updated_count} plugins updated out of {checked_count} checked") + print( + f"Update summary: {updated_count} plugins updated out of {checked_count} checked" + ) if updated_count > 0: print("\nUpdated plugins:") for plugin in updated_plugins: - print(f" - {plugin['name']}: {plugin['old_version']} → {plugin['new_version']}") + print( + f" - {plugin['name']}: {plugin['old_version']} → {plugin['new_version']}" + ) if failed_plugins: print(f"\nFailed to update {len(failed_plugins)} plugins:") @@ -370,7 +438,7 @@ def commit_changes(updated_plugins: list[dict[str, str]]) -> None: commit_message = f"yaziPlugins.{plugin['name']}: update from {plugin['old_version']} to {plugin['new_version']}" else: commit_message = f"yaziPlugins: update on {current_date}\n\n" - for plugin in sorted(updated_plugins, key=lambda x: x['name']): + for plugin in sorted(updated_plugins, key=lambda x: x["name"]): commit_message += f"- {plugin['name']}: {plugin['old_version']} → {plugin['new_version']}\n" run_command("git add pkgs/by-name/ya/yazi/plugins/", capture_output=False) @@ -388,7 +456,9 @@ def main(): group = parser.add_mutually_exclusive_group() group.add_argument("--all", action="store_true", help="Update all Yazi plugins") group.add_argument("--plugin", type=str, help="Update a specific plugin by name") - parser.add_argument("--commit", action="store_true", help="Commit changes after updating") + parser.add_argument( + "--commit", action="store_true", help="Commit changes after updating" + ) args = parser.parse_args() nixpkgs_dir = os.getcwd() @@ -401,7 +471,9 @@ def main(): elif args.plugin: plugin_name = args.plugin try: - plugin_pname = run_command(f'nix eval --raw -f {nixpkgs_dir} "yaziPlugins.{plugin_name}.pname"') + plugin_pname = run_command( + f'nix eval --raw -f {nixpkgs_dir} "yaziPlugins.{plugin_name}.pname"' + ) print(f"Updating Yazi plugin: {plugin_name}") update_info = update_single_plugin(nixpkgs_dir, plugin_name, plugin_pname) if update_info: diff --git a/pkgs/data/fonts/maple-font/update.py b/pkgs/data/fonts/maple-font/update.py index 1d3b2f87b4cdc..e4b41cb126c9a 100644 --- a/pkgs/data/fonts/maple-font/update.py +++ b/pkgs/data/fonts/maple-font/update.py @@ -6,10 +6,11 @@ import requests from urllib.parse import urlparse + def process_github_release(url, token=None): parsed = urlparse(url) - path_parts = parsed.path.strip('/').split('/') - if len(path_parts) < 5 or parsed.netloc != 'github.com': + path_parts = parsed.path.strip("/").split("/") + if len(path_parts) < 5 or parsed.netloc != "github.com": raise ValueError("Invalid GitHub release URL format") owner, repo, _, _, tag = path_parts[:5] headers = {"Accept": "application/vnd.github.v3+json"} @@ -17,18 +18,20 @@ def process_github_release(url, token=None): headers["Authorization"] = f"Bearer {token}" response = requests.get( f"https://api.github.com/repos/{owner}/{repo}/releases/tags/{tag}", - headers=headers + headers=headers, ) if response.status_code != 200: - raise RuntimeError(f"Failed to fetch release info: {response.status_code} ({response.json().get('message')})") + raise RuntimeError( + f"Failed to fetch release info: {response.status_code} ({response.json().get('message')})" + ) release_data = response.json() - assets = release_data.get('assets', []) + assets = release_data.get("assets", []) result = {} sha256_pattern = re.compile(r"^[a-fA-F0-9]{64}$") for asset in assets: - if not asset['name'].endswith('.sha256'): + if not asset["name"].endswith(".sha256"): continue - download_url = asset['browser_download_url'] + download_url = asset["browser_download_url"] content_response = requests.get(download_url, headers=headers) if content_response.status_code != 200: raise RuntimeError( @@ -40,19 +43,21 @@ def process_github_release(url, token=None): raise ValueError(f"Invalid SHA256 format in {asset['name']}") try: byte_data = bytes.fromhex(hex_hash) - base64_hash = base64.b64encode(byte_data).decode('utf-8') + base64_hash = base64.b64encode(byte_data).decode("utf-8") except Exception as e: raise RuntimeError(f"Error processing {asset['name']}: {str(e)}") - filename = asset['name'][:-7] + filename = asset["name"][:-7] result[filename] = f"sha256-{base64_hash}" output_file = f"{repo}_{tag}_hashes.json" - with open(output_file, 'w', encoding='utf-8') as f: + with open(output_file, "w", encoding="utf-8") as f: json.dump(result, f, indent=2, ensure_ascii=False) print(f"Successfully generated {output_file}") + + if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Process GitHub release hashes') - parser.add_argument('url', help='GitHub release URL') - parser.add_argument('-t', '--token', help='GitHub API token (optional)') + parser = argparse.ArgumentParser(description="Process GitHub release hashes") + parser.add_argument("url", help="GitHub release URL") + parser.add_argument("-t", "--token", help="GitHub API token (optional)") args = parser.parse_args() try: process_github_release(args.url, args.token) diff --git a/pkgs/data/fonts/nerd-fonts/update.py b/pkgs/data/fonts/nerd-fonts/update.py index e070d1ceebe03..f7b990e177670 100755 --- a/pkgs/data/fonts/nerd-fonts/update.py +++ b/pkgs/data/fonts/nerd-fonts/update.py @@ -8,7 +8,7 @@ if not all( f"UPDATE_NIX_{v}" in os.environ for v in ["NAME", "PNAME", "OLD_VERSION", "ATTR_PATH"] -) or not os.environ['UPDATE_NIX_ATTR_PATH'].startswith("nerd-fonts."): +) or not os.environ["UPDATE_NIX_ATTR_PATH"].startswith("nerd-fonts."): raise Exception( "Please don't run this script manually, only with:\n" "nix-shell maintainers/scripts/update.nix --argstr path nerd-fonts " @@ -17,31 +17,34 @@ RELEASE_INFO_URL = "https://api.github.com/repos/ryanoasis/nerd-fonts/releases/latest" FONTS_INFO_URL_TEMPLATE = "https://raw.githubusercontent.com/ryanoasis/nerd-fonts/refs/tags/{}/bin/scripts/lib/fonts.json" -SHA256_URL_TEMPLATE = "https://github.com/ryanoasis/nerd-fonts/releases/download/{}/SHA-256.txt" +SHA256_URL_TEMPLATE = ( + "https://github.com/ryanoasis/nerd-fonts/releases/download/{}/SHA-256.txt" +) RELEASE_INFO_FILENAME = "release.json" FONTS_INFO_FILENAME = "fonts.json" CHECKSUMS_FILENAME = "checksums.json" + def fetchjson(url): with ureq.urlopen(url) as r: return json.loads(r.read()) + def storejson(path, obj): with open(path, "w", encoding="utf-8") as f: json.dump(obj, f, indent=2, ensure_ascii=False) # Needed to satisfy EditorConfig's rules - f.write('\n') + f.write("\n") + def slicedict(d, ks): return {k: html.unescape(d[k]) for k in ks} + os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), "manifests")) -release_info = slicedict( - fetchjson(RELEASE_INFO_URL), - ["tag_name"] -) +release_info = slicedict(fetchjson(RELEASE_INFO_URL), ["tag_name"]) tag_name = release_info["tag_name"] with open(RELEASE_INFO_FILENAME, "r", encoding="utf-8") as f: @@ -49,16 +52,18 @@ def slicedict(d, ks): if tag_name == former_tag_name: raise Exception("no newer version available") # See: https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md#supported-features -print(json.dumps( - [ - { - "attrPath": "nerd-fonts", - "oldVersion": former_tag_name.removeprefix("v"), - "newVersion": tag_name.removeprefix("v"), - }, - ], - indent=2 -)) +print( + json.dumps( + [ + { + "attrPath": "nerd-fonts", + "oldVersion": former_tag_name.removeprefix("v"), + "newVersion": tag_name.removeprefix("v"), + }, + ], + indent=2, + ) +) storejson(RELEASE_INFO_FILENAME, release_info) @@ -74,7 +79,7 @@ def slicedict(d, ks): "licenseId", "patchedName", "version", - ] + ], ) for item in fetchjson(FONTS_INFO_URL_TEMPLATE.format(tag_name))["fonts"] ], @@ -85,7 +90,7 @@ def slicedict(d, ks): { filename: sha256 for row in ureq.urlopen(SHA256_URL_TEMPLATE.format(tag_name)) - for sha256, filename in [row.decode('utf-8').split()] + for sha256, filename in [row.decode("utf-8").split()] if filename.endswith(".tar.xz") }, ) diff --git a/pkgs/desktops/gnome/extensions/update-extensions.py b/pkgs/desktops/gnome/extensions/update-extensions.py index 2d1dd106c09bd..0065bd74d2b21 100755 --- a/pkgs/desktops/gnome/extensions/update-extensions.py +++ b/pkgs/desktops/gnome/extensions/update-extensions.py @@ -55,9 +55,7 @@ def fetch_extension_data(uuid: str, version: str) -> tuple[str, str]: # The download URLs follow this schema uuid = uuid.replace("@", "") - url: str = ( - f"https://extensions.gnome.org/extension-data/{uuid}.v{version}.shell-extension.zip" - ) + url: str = f"https://extensions.gnome.org/extension-data/{uuid}.v{version}.shell-extension.zip" # Download extension and add the zip content to nix-store for _ in range(0, 10): @@ -211,7 +209,9 @@ def process_extension(extension: dict[str, Any]) -> dict[str, Any] | None: k: v["version"] for k, v in extension["shell_version_map"].items() } # Transform shell_version_map to be more useful for us. Also throw away unwanted versions - shell_version_map: dict[ShellVersion, dict[str, str]] = generate_extension_versions(shell_version_map, uuid) # type: ignore + shell_version_map: dict[ShellVersion, dict[str, str]] = generate_extension_versions( + shell_version_map, uuid + ) # type: ignore # No compatible versions found if not shell_version_map: @@ -263,7 +263,6 @@ def scrape_extensions_index() -> list[dict[str, Any]]: page += 1 logging.info("Scraping page " + str(page)) try: - with request( f"https://extensions.gnome.org/extension-query/?n_per_page=25&page={page}" ) as response: diff --git a/pkgs/desktops/gnome/find-latest-version.py b/pkgs/desktops/gnome/find-latest-version.py index 06874d0476141..79ba5c35550f5 100644 --- a/pkgs/desktops/gnome/find-latest-version.py +++ b/pkgs/desktops/gnome/find-latest-version.py @@ -114,7 +114,9 @@ def odd_unstable(version: Version) -> Stability: return Stability.STABLE even = version_parts[1] % 2 == 0 - prerelease = (version_parts[1] >= 90 and version_parts[1] < 100) or (version_parts[1] >= 900 and version_parts[1] < 1000) + prerelease = (version_parts[1] >= 90 and version_parts[1] < 100) or ( + version_parts[1] >= 900 and version_parts[1] < 1000 + ) stable = even and not prerelease if stable: return Stability.STABLE @@ -180,7 +182,9 @@ def tagged(version: Version) -> Stability: >>> tagged(Version("3.2.1rc.3")) Stability.UNSTABLE """ - prerelease = "alpha" in version.value or "beta" in version.value or "rc" in version.value + prerelease = ( + "alpha" in version.value or "beta" in version.value or "rc" in version.value + ) if prerelease: return Stability.UNSTABLE else: @@ -221,12 +225,19 @@ def make_version_policy( if not upper_bound: return lambda version: selected.allows(version_classifier(version)) else: - return lambda version: selected.allows(version_classifier(version)) and version < upper_bound + return ( + lambda version: selected.allows(version_classifier(version)) + and version < upper_bound + ) def find_versions(package_name: str, version_policy: VersionPolicy) -> List[Version]: # The structure of cache.json: https://gitlab.gnome.org/Infrastructure/sysadmin-bin/blob/master/ftpadmin#L762 - cache = json.loads(requests.get(f"https://download.gnome.org/sources/{package_name}/cache.json").text) + cache = json.loads( + requests.get( + f"https://download.gnome.org/sources/{package_name}/cache.json" + ).text + ) if type(cache) != list or cache[0] != 4: raise Exception("Unknown format of cache.json file.") @@ -272,8 +283,12 @@ def find_versions(package_name: str, version_policy: VersionPolicy) -> List[Vers upper_bound = getattr(args, "upper-bound") if upper_bound is not None: upper_bound = Version(upper_bound) - version_policy_kind = arg_to_enum(VersionPolicyKind, getattr(args, "version-policy")) - version_policy = make_version_policy(version_policy_kind, requested_release, upper_bound) + version_policy_kind = arg_to_enum( + VersionPolicyKind, getattr(args, "version-policy") + ) + version_policy = make_version_policy( + version_policy_kind, requested_release, upper_bound + ) try: versions = find_versions(package_name, version_policy) diff --git a/pkgs/development/compilers/llvm/update-git.py b/pkgs/development/compilers/llvm/update-git.py index 9db47fac00dbd..3b430614d69de 100755 --- a/pkgs/development/compilers/llvm/update-git.py +++ b/pkgs/development/compilers/llvm/update-git.py @@ -14,26 +14,26 @@ from urllib.request import urlopen, Request -DEFAULT_NIX = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'default.nix') +DEFAULT_NIX = os.path.join(os.path.dirname(os.path.abspath(__file__)), "default.nix") def get_latest_chromium_build(): - RELEASES_URL = 'https://versionhistory.googleapis.com/v1/chrome/platforms/linux/channels/dev/versions/all/releases?filter=endtime=none&order_by=version%20desc' - print(f'GET {RELEASES_URL}') + RELEASES_URL = "https://versionhistory.googleapis.com/v1/chrome/platforms/linux/channels/dev/versions/all/releases?filter=endtime=none&order_by=version%20desc" + print(f"GET {RELEASES_URL}") with urlopen(RELEASES_URL) as resp: - return json.load(resp)['releases'][0] + return json.load(resp)["releases"][0] def get_file_revision(revision, file_path): """Fetches the requested Git revision of the given Chromium file.""" - url = f'https://raw.githubusercontent.com/chromium/chromium/{revision}/{file_path}' + url = f"https://raw.githubusercontent.com/chromium/chromium/{revision}/{file_path}" with urlopen(url) as http_response: return http_response.read().decode() def get_commit(ref): - url = f'https://api.github.com/repos/llvm/llvm-project/commits/{ref}' - headers = {'Accept': 'application/vnd.github.v3+json'} + url = f"https://api.github.com/repos/llvm/llvm-project/commits/{ref}" + headers = {"Accept": "application/vnd.github.v3+json"} request = Request(url, headers=headers) with urlopen(request) as http_response: return json.loads(http_response.read().decode()) @@ -49,41 +49,61 @@ def get_current_revision(): sys.exit(1) -def nix_prefetch_url(url, algo='sha256'): +def nix_prefetch_url(url, algo="sha256"): """Prefetches the content of the given URL.""" - print(f'nix-prefetch-url {url}') - out = subprocess.check_output(['nix-prefetch-url', '--type', algo, '--unpack', url]) - return out.decode('utf-8').rstrip() + print(f"nix-prefetch-url {url}") + out = subprocess.check_output(["nix-prefetch-url", "--type", algo, "--unpack", url]) + return out.decode("utf-8").rstrip() chromium_build = get_latest_chromium_build() -chromium_version = chromium_build['version'] -print(f'chromiumDev version: {chromium_version}') -print('Getting LLVM commit...') -clang_update_script = get_file_revision(chromium_version, 'tools/clang/scripts/update.py') -clang_revision = re.search(r"^CLANG_REVISION = '(.+)'$", clang_update_script, re.MULTILINE).group(1) -clang_commit_short = re.search(r"llvmorg-[0-9]+-init-[0-9]+-g([0-9a-f]{8})", clang_revision).group(1) -release_version = re.search(r"^RELEASE_VERSION = '(.+)'$", clang_update_script, re.MULTILINE).group(1) +chromium_version = chromium_build["version"] +print(f"chromiumDev version: {chromium_version}") +print("Getting LLVM commit...") +clang_update_script = get_file_revision( + chromium_version, "tools/clang/scripts/update.py" +) +clang_revision = re.search( + r"^CLANG_REVISION = '(.+)'$", clang_update_script, re.MULTILINE +).group(1) +clang_commit_short = re.search( + r"llvmorg-[0-9]+-init-[0-9]+-g([0-9a-f]{8})", clang_revision +).group(1) +release_version = re.search( + r"^RELEASE_VERSION = '(.+)'$", clang_update_script, re.MULTILINE +).group(1) commit = get_commit(clang_commit_short) if get_current_revision() == commit["sha"]: - print('No new update available.') + print("No new update available.") sys.exit(0) -date = datetime.fromisoformat(commit['commit']['committer']['date'].rstrip('Z')).date().isoformat() -version = f'unstable-{date}' -print('Prefetching source tarball...') -hash = nix_prefetch_url(f'https://github.com/llvm/llvm-project/archive/{commit["sha"]}.tar.gz') -print('Updating default.nix...') +date = ( + datetime.fromisoformat(commit["commit"]["committer"]["date"].rstrip("Z")) + .date() + .isoformat() +) +version = f"unstable-{date}" +print("Prefetching source tarball...") +hash = nix_prefetch_url( + f"https://github.com/llvm/llvm-project/archive/{commit['sha']}.tar.gz" +) +print("Updating default.nix...") with fileinput.FileInput(DEFAULT_NIX, inplace=True) as f: for line in f: if match := re.search(r'^ rev-version = "unstable-(.+)";', line): - old_date = match.group(1) + old_date = match.group(1) result = line - result = re.sub(r'^ version = ".+";', f' version = "{release_version}";', result) - result = re.sub(r'^ rev = ".*";', f' rev = "{commit["sha"]}";', result) - result = re.sub(r'^ rev-version = ".+";', f' rev-version = "{version}";', result) + result = re.sub( + r'^ version = ".+";', f' version = "{release_version}";', result + ) + result = re.sub( + r'^ rev = ".*";', f' rev = "{commit["sha"]}";', result + ) + result = re.sub( + r'^ rev-version = ".+";', f' rev-version = "{version}";', result + ) result = re.sub(r'^ sha256 = ".+";', f' sha256 = "{hash}";', result) - print(result, end='') + print(result, end="") # Commit the result: commit_message = f"llvmPackages_git: {old_date} -> {date}" -subprocess.run(['git', 'add', DEFAULT_NIX], check=True) -subprocess.run(['git', 'commit', '--file=-'], input=commit_message.encode(), check=True) +subprocess.run(["git", "add", DEFAULT_NIX], check=True) +subprocess.run(["git", "commit", "--file=-"], input=commit_message.encode(), check=True) diff --git a/pkgs/development/compilers/semeru-bin/generate-sources.py b/pkgs/development/compilers/semeru-bin/generate-sources.py index b647fca7cf4ec..2f869633003c6 100755 --- a/pkgs/development/compilers/semeru-bin/generate-sources.py +++ b/pkgs/development/compilers/semeru-bin/generate-sources.py @@ -17,47 +17,69 @@ "arm": ("armv6l", "armv7l"), } + def get_sha256(url): resp = requests.get(url) if resp.status_code != 200: - print("error: could not fetch checksum from url {}: code {}".format(url, resp.status_code), file=sys.stderr) + print( + "error: could not fetch checksum from url {}: code {}".format( + url, resp.status_code + ), + file=sys.stderr, + ) sys.exit(1) return resp.text.strip().split(" ")[0] + def generate_sources(releases, feature_version, out): latest_version = None for release in releases: - if release["prerelease"]: continue - if not re.search("_openj9-", release["name"]): continue + if release["prerelease"]: + continue + if not re.search("_openj9-", release["name"]): + continue for asset in release["assets"]: - match = re.match("ibm-semeru-open-(?P[a-z]*)_(?P[a-z0-9]*)_(?P[a-z]*)_(?:(?P[0-9]*)u(?P[0-9]*)b(?P[0-9]*)|(?P[0-9]*)\\.(?P[0-9]*)\\.(?P[0-9]*)_(?P[0-9]*))_(?P[a-z0-9]*)-[0-9]*\\.[0-9]*\\.[0-9]\\.tar\\.gz$", asset["name"]) - - if not match: continue - if match["os"] not in oses: continue - if match["image_type"] not in types: continue - if match["jvm_impl"] not in impls: continue - if match["architecture"] not in arch_to_nixos: continue - - version = ".".join([ - match["major1"] or match["major2"], - match["minor2"] or "0", - match["security1"] or match["security2"] - ]) + match = re.match( + "ibm-semeru-open-(?P[a-z]*)_(?P[a-z0-9]*)_(?P[a-z]*)_(?:(?P[0-9]*)u(?P[0-9]*)b(?P[0-9]*)|(?P[0-9]*)\\.(?P[0-9]*)\\.(?P[0-9]*)_(?P[0-9]*))_(?P[a-z0-9]*)-[0-9]*\\.[0-9]*\\.[0-9]\\.tar\\.gz$", + asset["name"], + ) + + if not match: + continue + if match["os"] not in oses: + continue + if match["image_type"] not in types: + continue + if match["jvm_impl"] not in impls: + continue + if match["architecture"] not in arch_to_nixos: + continue + + version = ".".join( + [ + match["major1"] or match["major2"], + match["minor2"] or "0", + match["security1"] or match["security2"], + ] + ) build = match["build1"] or match["build2"] - if latest_version and latest_version != (version, build): continue + if latest_version and latest_version != (version, build): + continue latest_version = (version, build) arch_map = ( - out - .setdefault(match["jvm_impl"], {}) + out.setdefault(match["jvm_impl"], {}) .setdefault(match["os"], {}) .setdefault(match["image_type"], {}) - .setdefault(feature_version, { - "packageType": match["image_type"], - "vmType": match["jvm_impl"], - }) + .setdefault( + feature_version, + { + "packageType": match["image_type"], + "vmType": match["jvm_impl"], + }, + ) ) for nixos_arch in arch_to_nixos[match["architecture"]]: @@ -73,13 +95,20 @@ def generate_sources(releases, feature_version, out): out = {} for feature_version in feature_versions: - resp = requests.get(f"https://api.github.com/repos/ibmruntimes/semeru{feature_version}-binaries/releases") + resp = requests.get( + f"https://api.github.com/repos/ibmruntimes/semeru{feature_version}-binaries/releases" + ) if resp.status_code != 200: - print("error: could not fetch data for release {} (code {}) {}".format(feature_version, resp.status_code, resp.content), file=sys.stderr) + print( + "error: could not fetch data for release {} (code {}) {}".format( + feature_version, resp.status_code, resp.content + ), + file=sys.stderr, + ) sys.exit(1) generate_sources(resp.json(), f"openjdk{feature_version}", out) with open("sources.json", "w") as f: json.dump(out, f, indent=2, sort_keys=True) - f.write('\n') + f.write("\n") diff --git a/pkgs/development/compilers/temurin-bin/generate-sources.py b/pkgs/development/compilers/temurin-bin/generate-sources.py index 0ebf923b1078a..6a486f1448ab8 100755 --- a/pkgs/development/compilers/temurin-bin/generate-sources.py +++ b/pkgs/development/compilers/temurin-bin/generate-sources.py @@ -19,31 +19,42 @@ "riscv64": ("riscv64",), } + def generate_sources(assets, feature_version, out): for asset in assets: binary = asset["binary"] - if binary["os"] not in oses: continue - if binary["image_type"] not in types: continue - if binary["jvm_impl"] not in impls: continue - if binary["heap_size"] != "normal": continue - if binary["architecture"] not in arch_to_nixos: continue + if binary["os"] not in oses: + continue + if binary["image_type"] not in types: + continue + if binary["jvm_impl"] not in impls: + continue + if binary["heap_size"] != "normal": + continue + if binary["architecture"] not in arch_to_nixos: + continue - version = ".".join(str(v) for v in [ - asset["version"]["major"], - asset["version"]["minor"], - asset["version"]["security"] - ]) + version = ".".join( + str(v) + for v in [ + asset["version"]["major"], + asset["version"]["minor"], + asset["version"]["security"], + ] + ) build = str(asset["version"]["build"]) arch_map = ( - out - .setdefault(binary["jvm_impl"], {}) + out.setdefault(binary["jvm_impl"], {}) .setdefault(binary["os"], {}) .setdefault(binary["image_type"], {}) - .setdefault(feature_version, { - "packageType": binary["image_type"], - "vmType": binary["jvm_impl"], - }) + .setdefault( + feature_version, + { + "packageType": binary["image_type"], + "vmType": binary["jvm_impl"], + }, + ) ) for nixos_arch in arch_to_nixos[binary["architecture"]]: @@ -60,14 +71,22 @@ def generate_sources(assets, feature_version, out): out = {} for feature_version in feature_versions: # Default user-agent is blocked by Azure WAF. - headers = {'user-agent': 'nixpkgs-temurin-generate-sources/1.0.0'} - resp = requests.get(f"https://api.adoptium.net/v3/assets/latest/{feature_version}/hotspot", headers=headers) + headers = {"user-agent": "nixpkgs-temurin-generate-sources/1.0.0"} + resp = requests.get( + f"https://api.adoptium.net/v3/assets/latest/{feature_version}/hotspot", + headers=headers, + ) if resp.status_code != 200: - print("error: could not fetch data for release {} (code {}) {}".format(feature_version, resp.status_code, resp.content), file=sys.stderr) + print( + "error: could not fetch data for release {} (code {}) {}".format( + feature_version, resp.status_code, resp.content + ), + file=sys.stderr, + ) sys.exit(1) generate_sources(resp.json(), f"openjdk{feature_version}", out) with open("sources.json", "w") as f: json.dump(out, f, indent=2, sort_keys=True) - f.write('\n') + f.write("\n") diff --git a/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts.py b/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts.py index 4713cfb7026e5..e7f8b907fbf91 100644 --- a/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts.py +++ b/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts.py @@ -4,16 +4,17 @@ import sys import os from typing import Dict, List, Set, Tuple + do_abort: bool = False packages: Dict[str, Dict[str, Dict[str, List[str]]]] = collections.defaultdict(dict) found_paths: Set[Path] = set() out_path: Path = Path(os.getenv("out")) version: Tuple[int, int] = sys.version_info -site_packages_path: str = f'lib/python{version[0]}.{version[1]}/site-packages' +site_packages_path: str = f"lib/python{version[0]}.{version[1]}/site-packages" def get_name(dist: PathDistribution) -> str: - return dist.metadata['name'].lower().replace('-', '_') + return dist.metadata["name"].lower().replace("-", "_") # pretty print a package @@ -25,9 +26,9 @@ def describe_package(dist: PathDistribution) -> str: def describe_parents(parents: List[str]) -> str: if not parents: return "" - return \ - f" dependency chain:\n " \ - + str(f"\n ...depending on: ".join(parents)) + return f" dependency chain:\n " + str( + f"\n ...depending on: ".join(parents) + ) # inserts an entry into 'packages' @@ -39,9 +40,11 @@ def add_entry(name: str, version: str, store_path: str, parents: List[str]) -> N # transitively discover python dependencies and store them in 'packages' -def find_packages(store_path: Path, site_packages_path: str, parents: List[str]) -> None: - site_packages: Path = (store_path / site_packages_path) - propagated_build_inputs: Path = (store_path / "nix-support/propagated-build-inputs") +def find_packages( + store_path: Path, site_packages_path: str, parents: List[str] +) -> None: + site_packages: Path = store_path / site_packages_path + propagated_build_inputs: Path = store_path / "nix-support/propagated-build-inputs" # only visit each path once, to avoid exponential complexity with highly # connected dependency graphs @@ -60,7 +63,9 @@ def find_packages(store_path: Path, site_packages_path: str, parents: List[str]) with open(propagated_build_inputs, "r") as f: build_inputs: List[str] = f.read().split() for build_input in build_inputs: - find_packages(Path(build_input), site_packages_path, parents + [build_input]) + find_packages( + Path(build_input), site_packages_path, parents + [build_input] + ) find_packages(out_path, site_packages_path, [f"this derivation: {out_path}"]) @@ -72,7 +77,7 @@ def find_packages(store_path: Path, site_packages_path: str, parents: List[str]) print("Found duplicated packages in closure for dependency '{}': ".format(name)) for store_path, candidate in store_paths.items(): print(f" {name} {candidate['version']} ({store_path})") - print(describe_parents(candidate['parents'])) + print(describe_parents(candidate["parents"])) # fail if duplicates were found if do_abort: diff --git a/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts_py2.py b/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts_py2.py index bb82900c65a92..2845f0e336e49 100644 --- a/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts_py2.py +++ b/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts_py2.py @@ -9,7 +9,7 @@ for req in pkg_resources.find_distributions(f): if req not in packages[req.project_name]: # some exceptions inside buildPythonPackage - if req.project_name in ['setuptools', 'pip', 'wheel']: + if req.project_name in ["setuptools", "pip", "wheel"]: continue packages[req.project_name].append(req) @@ -24,7 +24,8 @@ if do_abort: print("") print( - 'Package duplicates found in closure, see above. Usually this ' - 'happens if two packages depend on different version ' - 'of the same dependency.') + "Package duplicates found in closure, see above. Usually this " + "happens if two packages depend on different version " + "of the same dependency." + ) sys.exit(1) diff --git a/pkgs/development/interpreters/python/hooks/python-runtime-deps-check-hook.py b/pkgs/development/interpreters/python/hooks/python-runtime-deps-check-hook.py index 77a6f33d49dd4..0f95745f3eef4 100644 --- a/pkgs/development/interpreters/python/hooks/python-runtime-deps-check-hook.py +++ b/pkgs/development/interpreters/python/hooks/python-runtime-deps-check-hook.py @@ -7,7 +7,6 @@ violated version constraints. """ - import importlib.metadata import re import sys diff --git a/pkgs/development/interpreters/python/run_setup.py b/pkgs/development/interpreters/python/run_setup.py index e3a530eb0cb6a..06a7fad836f2c 100644 --- a/pkgs/development/interpreters/python/run_setup.py +++ b/pkgs/development/interpreters/python/run_setup.py @@ -3,6 +3,11 @@ import setuptools import tokenize -__file__='setup.py'; - -exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec')) +__file__ = "setup.py" +exec( + compile( + getattr(tokenize, "open", open)(__file__).read().replace("\\r\\n", "\\n"), + __file__, + "exec", + ) +) diff --git a/pkgs/development/interpreters/python/sitecustomize.py b/pkgs/development/interpreters/python/sitecustomize.py index c6924a8e93f04..80841a98699c6 100644 --- a/pkgs/development/interpreters/python/sitecustomize.py +++ b/pkgs/development/interpreters/python/sitecustomize.py @@ -12,26 +12,31 @@ Similarly, this module listens to the environment variable `NIX_PYTHONEXECUTABLE` and sets `sys.executable` to its value. """ + import site import sys import os import functools -paths = os.environ.pop('NIX_PYTHONPATH', None) +paths = os.environ.pop("NIX_PYTHONPATH", None) if paths: - functools.reduce(lambda k, p: site.addsitedir(p, k), paths.split(':'), site._init_pathinfo()) + functools.reduce( + lambda k, p: site.addsitedir(p, k), paths.split(":"), site._init_pathinfo() + ) # Check whether we are in a venv or virtualenv. # For Python 3 we check whether our `base_prefix` is different from our current `prefix`. # For Python 2 we check whether the non-standard `real_prefix` is set. # https://stackoverflow.com/questions/1871549/determine-if-python-is-running-inside-virtualenv -in_venv = (sys.version_info.major == 3 and sys.prefix != sys.base_prefix) or (sys.version_info.major == 2 and hasattr(sys, "real_prefix")) +in_venv = (sys.version_info.major == 3 and sys.prefix != sys.base_prefix) or ( + sys.version_info.major == 2 and hasattr(sys, "real_prefix") +) if not in_venv: - executable = os.environ.pop('NIX_PYTHONEXECUTABLE', None) - prefix = os.environ.pop('NIX_PYTHONPREFIX', None) + executable = os.environ.pop("NIX_PYTHONEXECUTABLE", None) + prefix = os.environ.pop("NIX_PYTHONPREFIX", None) - if 'PYTHONEXECUTABLE' not in os.environ and executable is not None: + if "PYTHONEXECUTABLE" not in os.environ and executable is not None: sys.executable = executable if prefix is not None: # Sysconfig does not like it when sys.prefix is set to None diff --git a/pkgs/development/interpreters/python/tests/test_environments/test_python.py b/pkgs/development/interpreters/python/tests/test_environments/test_python.py index 0302ae1a6f0bf..d3a8c6d6c9540 100644 --- a/pkgs/development/interpreters/python/tests/test_environments/test_python.py +++ b/pkgs/development/interpreters/python/tests/test_environments/test_python.py @@ -17,14 +17,13 @@ INTERPRETER = "@interpreter@" PYTHON_VERSION = "@pythonVersion@" -IS_VIRTUALENV = @is_virtualenv@ -IS_VENV = @is_venv@ -IS_NIXENV = @is_nixenv@ +IS_VIRTUALENV = "@is_virtualenv@" == "True" +IS_VENV = "@is_venv@" == "True" +IS_NIXENV = "@is_nixenv@" == "True" IS_PYPY = platform.python_implementation() == "PyPy" class TestCasePython(unittest.TestCase): - @unittest.skipIf(IS_PYPY, "Executable is incorrect and needs to be fixed.") def test_interpreter(self): self.assertEqual(sys.executable, INTERPRETER) @@ -37,18 +36,23 @@ def test_prefix(self): def test_site_prefix(self): self.assertTrue(sys.prefix in site.PREFIXES) - @unittest.skipIf(IS_PYPY or sys.version_info.major==2, "Python 2 does not have base_prefix") + @unittest.skipIf( + IS_PYPY or sys.version_info.major == 2, "Python 2 does not have base_prefix" + ) def test_base_prefix(self): if IS_VENV or IS_VIRTUALENV: self.assertNotEqual(sys.prefix, sys.base_prefix) else: self.assertEqual(sys.prefix, sys.base_prefix) if IS_NIXENV: - self.assertNotEqual(sys.base_prefix, sysconfig.get_config_var('prefix')) + self.assertNotEqual(sys.base_prefix, sysconfig.get_config_var("prefix")) else: - self.assertEqual(sys.base_prefix, sysconfig.get_config_var('prefix')) + self.assertEqual(sys.base_prefix, sysconfig.get_config_var("prefix")) - @unittest.skipIf(sys.version_info.major==3, "sys.real_prefix is only set by virtualenv in case of Python 2.") + @unittest.skipIf( + sys.version_info.major == 3, + "sys.real_prefix is only set by virtualenv in case of Python 2.", + ) def test_real_prefix(self): self.assertTrue(hasattr(sys, "real_prefix") == IS_VIRTUALENV) diff --git a/pkgs/development/interpreters/python/tests/test_nix_pythonprefix/typeddep/setup.py b/pkgs/development/interpreters/python/tests/test_nix_pythonprefix/typeddep/setup.py index 25bac69ea09a4..d71b667b8c5dc 100644 --- a/pkgs/development/interpreters/python/tests/test_nix_pythonprefix/typeddep/setup.py +++ b/pkgs/development/interpreters/python/tests/test_nix_pythonprefix/typeddep/setup.py @@ -1,18 +1,20 @@ from setuptools import setup -setup(**{ - 'name': 'typeddep', - 'version': '1.3.3.7', - 'description': 'Minimal repro to test mypy and site prefixes with Nix', - 'long_description': None, - 'author': 'adisbladis', - 'author_email': 'adisbladis@gmail.com', - 'maintainer': None, - 'maintainer_email': None, - 'url': None, - 'packages': ['typeddep'], - 'package_data': {'': ['*']}, - 'install_requires': [], - 'entry_points': {}, - 'python_requires': '>=3.7,<4.0', -}) +setup( + **{ + "name": "typeddep", + "version": "1.3.3.7", + "description": "Minimal repro to test mypy and site prefixes with Nix", + "long_description": None, + "author": "adisbladis", + "author_email": "adisbladis@gmail.com", + "maintainer": None, + "maintainer_email": None, + "url": None, + "packages": ["typeddep"], + "package_data": {"": ["*"]}, + "install_requires": [], + "entry_points": {}, + "python_requires": ">=3.7,<4.0", + } +) diff --git a/pkgs/development/julia-modules/python/dag.py b/pkgs/development/julia-modules/python/dag.py index 4cb775cd42cd4..c3968dc482634 100644 --- a/pkgs/development/julia-modules/python/dag.py +++ b/pkgs/development/julia-modules/python/dag.py @@ -1,9 +1,9 @@ - # This file based on a ChatGPT reponse for the following prompt: # "can you write code in python to build up a DAG representing # a dependency tree, and then a function that can return all the # dependencies of a given node?" + class Node: def __init__(self, name): self.name = name @@ -29,7 +29,9 @@ def add_dependency(self, node_name, dependency_name): raise ValueError(f"Node '{node_name}' does not exist in the graph.") if dependency_name not in self.nodes: - raise ValueError(f"Dependency '{dependency_name}' does not exist in the graph.") + raise ValueError( + f"Dependency '{dependency_name}' does not exist in the graph." + ) self.nodes[node_name].dependencies.add(dependency_name) @@ -44,7 +46,7 @@ def traverse_dependencies(current_node): for dependency in current_node.dependencies: dependencies.add(dependency) if dependency in self.nodes: - traverse_dependencies(self.nodes[dependency]) + traverse_dependencies(self.nodes[dependency]) traverse_dependencies(node) return dependencies diff --git a/pkgs/development/julia-modules/python/dedup_overrides.py b/pkgs/development/julia-modules/python/dedup_overrides.py index d1a7337c598dd..f99264bbbea1a 100755 --- a/pkgs/development/julia-modules/python/dedup_overrides.py +++ b/pkgs/development/julia-modules/python/dedup_overrides.py @@ -1,4 +1,3 @@ - import json from pathlib import Path import sys @@ -8,7 +7,7 @@ out_path = Path(sys.argv[2]) with open(overrides_path, "r") as f: - overrides = json.loads(f.read()) + overrides = json.loads(f.read()) with open(out_path, "w") as f: - toml.dump(overrides, f) + toml.dump(overrides, f) diff --git a/pkgs/development/julia-modules/python/extract_artifacts.py b/pkgs/development/julia-modules/python/extract_artifacts.py index 642611e029c42..07046e1d5045e 100755 --- a/pkgs/development/julia-modules/python/extract_artifacts.py +++ b/pkgs/development/julia-modules/python/extract_artifacts.py @@ -1,4 +1,3 @@ - import json from pathlib import Path import multiprocessing @@ -13,44 +12,53 @@ # This should match the behavior of the default unpackPhase. # See https://github.com/NixOS/nixpkgs/blob/59fa082abdbf462515facc8800d517f5728c909d/pkgs/stdenv/generic/setup.sh#L1044 archive_extensions = [ - # xz extensions - ".tar.xz", - ".tar.lzma", - ".txz", - - # *.tar or *.tar.* - ".tar", - ".tar.Z", - ".tar.bz2", - ".tar.gz", - - # Other tar extensions - ".tgz", - ".tbz2", - ".tbz", - - ".zip" - ] - -def get_archive_derivation(uuid, artifact_name, url, sha256, closure_dependencies_dag, dependency_uuids, extra_libs, is_darwin): - depends_on = set() - if closure_dependencies_dag.has_node(uuid): - depends_on = set(closure_dependencies_dag.get_dependencies(uuid)).intersection(dependency_uuids) - - other_libs = extra_libs.get(uuid, []) - - if is_darwin: - fixup = f"""fixupPhase = let + # xz extensions + ".tar.xz", + ".tar.lzma", + ".txz", + # *.tar or *.tar.* + ".tar", + ".tar.Z", + ".tar.bz2", + ".tar.gz", + # Other tar extensions + ".tgz", + ".tbz2", + ".tbz", + ".zip", +] + + +def get_archive_derivation( + uuid, + artifact_name, + url, + sha256, + closure_dependencies_dag, + dependency_uuids, + extra_libs, + is_darwin, +): + depends_on = set() + if closure_dependencies_dag.has_node(uuid): + depends_on = set(closure_dependencies_dag.get_dependencies(uuid)).intersection( + dependency_uuids + ) + + other_libs = extra_libs.get(uuid, []) + + if is_darwin: + fixup = f"""fixupPhase = let libs = lib.concatMap (lib.mapAttrsToList (k: v: v.path)) [{" ".join(["uuid-" + x for x in depends_on])}]; in '' ''""" - else: - # We provide gcc.cc.lib by default in order to get some common libraries - # like libquadmath.so. A number of packages expect this to be available and - # will give linker errors if it isn't. - fixup = f"""fixupPhase = let + else: + # We provide gcc.cc.lib by default in order to get some common libraries + # like libquadmath.so. A number of packages expect this to be available and + # will give linker errors if it isn't. + fixup = f"""fixupPhase = let libs = lib.concatMap (lib.mapAttrsToList (k: v: v.path)) [{" ".join(["uuid-" + x for x in depends_on])}]; in '' @@ -60,7 +68,7 @@ def get_archive_derivation(uuid, artifact_name, url, sha256, closure_dependencie patchelf --set-interpreter ${{glibc}}/lib/ld-linux-x86-64.so.2 {{}} \\; ''""" - return f"""stdenv.mkDerivation {{ + return f"""stdenv.mkDerivation {{ name = "{artifact_name}"; src = fetchurl {{ url = "{url}"; @@ -77,92 +85,127 @@ def get_archive_derivation(uuid, artifact_name, url, sha256, closure_dependencie {fixup}; }}""" + def get_plain_derivation(url, sha256): - return f"""fetchurl {{ + return f"""fetchurl {{ url = "{url}"; sha256 = "{sha256}"; }}""" -def process_item(args): - item, julia_path, extract_artifacts_script, closure_dependencies_dag, dependency_uuids, extra_libs, is_darwin = args - uuid, src = item - lines = [] - - artifacts = toml.loads(subprocess.check_output([julia_path, extract_artifacts_script, uuid, src]).decode()) - if not artifacts: - return f' uuid-{uuid} = {{}};\n' - - lines.append(f' uuid-{uuid} = {{') - - for artifact_name, details in artifacts.items(): - if len(details["download"]) == 0: - continue - download = details["download"][0] - url = download["url"] - sha256 = download["sha256"] - - git_tree_sha1 = details["git-tree-sha1"] - parsed_url = urlparse(url) - if any(parsed_url.path.endswith(x) for x in archive_extensions): - derivation = get_archive_derivation(uuid, artifact_name, url, sha256, closure_dependencies_dag, dependency_uuids, extra_libs, is_darwin) - else: - derivation = get_plain_derivation(url, sha256) - - lines.append(f""" "{artifact_name}" = {{ +def process_item(args): + ( + item, + julia_path, + extract_artifacts_script, + closure_dependencies_dag, + dependency_uuids, + extra_libs, + is_darwin, + ) = args + uuid, src = item + lines = [] + + artifacts = toml.loads( + subprocess.check_output( + [julia_path, extract_artifacts_script, uuid, src] + ).decode() + ) + if not artifacts: + return f" uuid-{uuid} = {{}};\n" + + lines.append(f" uuid-{uuid} = {{") + + for artifact_name, details in artifacts.items(): + if len(details["download"]) == 0: + continue + download = details["download"][0] + url = download["url"] + sha256 = download["sha256"] + + git_tree_sha1 = details["git-tree-sha1"] + + parsed_url = urlparse(url) + if any(parsed_url.path.endswith(x) for x in archive_extensions): + derivation = get_archive_derivation( + uuid, + artifact_name, + url, + sha256, + closure_dependencies_dag, + dependency_uuids, + extra_libs, + is_darwin, + ) + else: + derivation = get_plain_derivation(url, sha256) + + lines.append(f""" "{artifact_name}" = {{ sha1 = "{git_tree_sha1}"; path = {derivation}; }};\n""") - lines.append(' };\n') - - return "\n".join(lines) - -def main(): - dependencies_path = Path(sys.argv[1]) - closure_yaml_path = Path(sys.argv[2]) - julia_path = Path(sys.argv[3]) - extract_artifacts_script = Path(sys.argv[4]) - extra_libs = json.loads(sys.argv[5]) - is_darwin = json.loads(sys.argv[6]) - out_path = Path(sys.argv[7]) - - with open(dependencies_path, "r") as f: - dependencies = yaml.safe_load(f) - dependency_uuids = list(dependencies.keys()) # Convert dict_keys to list - - with open(closure_yaml_path, "r") as f: - # Build up a map of UUID -> closure information - closure_yaml_list = yaml.safe_load(f) or [] - closure_yaml = {} - for item in closure_yaml_list: - closure_yaml[item["uuid"]] = item - - # Build up a dependency graph of UUIDs - closure_dependencies_dag = dag.DAG() - for uuid, contents in closure_yaml.items(): - if contents.get("depends_on"): - closure_dependencies_dag.add_node(uuid, dependencies=contents["depends_on"].values()) - - with open(out_path, "w") as f: - if is_darwin: - f.write("{ lib, fetchurl, pkgs, stdenv }:\n\n") - else: - f.write("{ lib, fetchurl, gcc, glibc, pkgs, stdenv }:\n\n") + lines.append(" };\n") - f.write("rec {\n") + return "\n".join(lines) - with multiprocessing.Pool(10) as pool: - # Create args tuples for each item - process_args = [ - (item, julia_path, extract_artifacts_script, closure_dependencies_dag, dependency_uuids, extra_libs, is_darwin) - for item in dependencies.items() - ] - for s in pool.map(process_item, process_args): - f.write(s) - f.write(f""" +def main(): + dependencies_path = Path(sys.argv[1]) + closure_yaml_path = Path(sys.argv[2]) + julia_path = Path(sys.argv[3]) + extract_artifacts_script = Path(sys.argv[4]) + extra_libs = json.loads(sys.argv[5]) + is_darwin = json.loads(sys.argv[6]) + out_path = Path(sys.argv[7]) + + with open(dependencies_path, "r") as f: + dependencies = yaml.safe_load(f) + dependency_uuids = list(dependencies.keys()) # Convert dict_keys to list + + with open(closure_yaml_path, "r") as f: + # Build up a map of UUID -> closure information + closure_yaml_list = yaml.safe_load(f) or [] + closure_yaml = {} + for item in closure_yaml_list: + closure_yaml[item["uuid"]] = item + + # Build up a dependency graph of UUIDs + closure_dependencies_dag = dag.DAG() + for uuid, contents in closure_yaml.items(): + if contents.get("depends_on"): + closure_dependencies_dag.add_node( + uuid, dependencies=contents["depends_on"].values() + ) + + with open(out_path, "w") as f: + if is_darwin: + f.write("{ lib, fetchurl, pkgs, stdenv }:\n\n") + else: + f.write("{ lib, fetchurl, gcc, glibc, pkgs, stdenv }:\n\n") + + f.write("rec {\n") + + with multiprocessing.Pool(10) as pool: + # Create args tuples for each item + process_args = [ + ( + item, + julia_path, + extract_artifacts_script, + closure_dependencies_dag, + dependency_uuids, + extra_libs, + is_darwin, + ) + for item in dependencies.items() + ] + for s in pool.map(process_item, process_args): + f.write(s) + + f.write(f""" }}\n""") + if __name__ == "__main__": - main() + main() diff --git a/pkgs/development/julia-modules/python/find_package_implications.py b/pkgs/development/julia-modules/python/find_package_implications.py index f253d9c1e6268..137edec0a59ba 100644 --- a/pkgs/development/julia-modules/python/find_package_implications.py +++ b/pkgs/development/julia-modules/python/find_package_implications.py @@ -1,4 +1,3 @@ - import json import os from pathlib import Path @@ -12,13 +11,13 @@ package_implications = json.loads(package_implications_json) with open(dependencies_path) as f: - desired_packages = yaml.safe_load(f) or [] + desired_packages = yaml.safe_load(f) or [] extra_package_names = [] for pkg in desired_packages: - if pkg["name"] in package_implications: - extra_package_names.extend(package_implications[pkg["name"]]) + if pkg["name"] in package_implications: + extra_package_names.extend(package_implications[pkg["name"]]) if len(extra_package_names) > 0: - with open(out_path, "w") as f: - f.write("\n".join(extra_package_names)) + with open(out_path, "w") as f: + f.write("\n".join(extra_package_names)) diff --git a/pkgs/development/julia-modules/python/format_overrides.py b/pkgs/development/julia-modules/python/format_overrides.py index cc86ba3915287..79dbf479ba47d 100644 --- a/pkgs/development/julia-modules/python/format_overrides.py +++ b/pkgs/development/julia-modules/python/format_overrides.py @@ -1,4 +1,3 @@ - import json from pathlib import Path import sys @@ -8,15 +7,16 @@ out_path = Path(sys.argv[2]) with open(overrides_path, "r") as f: - overrides = json.loads(f.read()) + overrides = json.loads(f.read()) result = {} -for (uuid, artifacts) in overrides.items(): - if len(artifacts) == 0: continue +for uuid, artifacts in overrides.items(): + if len(artifacts) == 0: + continue - for (name, info) in artifacts.items(): - result[info["sha1"]] = info["path"] + for name, info in artifacts.items(): + result[info["sha1"]] = info["path"] with open(out_path, "w") as f: - toml.dump(result, f) + toml.dump(result, f) diff --git a/pkgs/development/julia-modules/python/minimal_registry.py b/pkgs/development/julia-modules/python/minimal_registry.py index ab33ac366ca86..0832075595d9f 100755 --- a/pkgs/development/julia-modules/python/minimal_registry.py +++ b/pkgs/development/julia-modules/python/minimal_registry.py @@ -1,4 +1,3 @@ - from collections import defaultdict import copy import json @@ -20,108 +19,123 @@ out_path = Path(sys.argv[5]) with open(desired_packages_path, "r") as f: - desired_packages = yaml.safe_load(f) or [] + desired_packages = yaml.safe_load(f) or [] uuid_to_versions = defaultdict(list) for pkg in desired_packages: - uuid_to_versions[pkg["uuid"]].append(pkg["version"]) + uuid_to_versions[pkg["uuid"]].append(pkg["version"]) with open(dependencies_path, "r") as f: - uuid_to_store_path = yaml.safe_load(f) + uuid_to_store_path = yaml.safe_load(f) os.makedirs(out_path) full_registry = toml.load(registry_path / "Registry.toml") registry = full_registry.copy() -registry["packages"] = {k: v for k, v in registry["packages"].items() if k in uuid_to_versions} - -for (uuid, versions) in uuid_to_versions.items(): - if uuid in package_overrides: - info = package_overrides[uuid] - - # Make a registry entry based on the info from the package override - path = Path(info["name"][0].upper()) / Path(info["name"]) - registry["packages"][uuid] = { - "name": info["name"], - "path": str(path), - } - - os.makedirs(out_path / path) - - # Read the Project.yaml from the src - project = toml.load(Path(info["src"]) / "Project.toml") - - # Generate all the registry files - with open(out_path / path / Path("Compat.toml"), "w") as f: - f.write('["%s"]\n' % info["version"]) - # Write nothing in Compat.toml, because we've already resolved everything - with open(out_path / path / Path("Deps.toml"), "w") as f: - f.write('["%s"]\n' % info["version"]) - if "deps" in project: - toml.dump(project["deps"], f) - with open(out_path / path / Path("Versions.toml"), "w") as f: - f.write('["%s"]\n' % info["version"]) - f.write('git-tree-sha1 = "%s"\n' % info["treehash"]) - with open(out_path / path / Path("Package.toml"), "w") as f: - toml.dump({ - "name": info["name"], - "uuid": uuid, - "repo": "file://" + info["src"], - }, f) - - elif uuid in registry["packages"]: - registry_info = registry["packages"][uuid] - name = registry_info["name"] - path = registry_info["path"] - - os.makedirs(out_path / path) - - # Copy some files to the minimal repo unchanged - for f in ["Compat.toml", "Deps.toml", "WeakCompat.toml", "WeakDeps.toml"]: - if (registry_path / path / f).exists(): - shutil.copy2(registry_path / path / f, out_path / path) - - # Copy the Versions.toml file, trimming down to the versions we care about. - # In the case where versions=None, this is a weak dep, and we keep all versions. - all_versions = toml.load(registry_path / path / "Versions.toml") - versions_to_keep = {k: v for k, v in all_versions.items() if k in versions} if versions != None else all_versions - for k, v in versions_to_keep.items(): - del v["nix-sha256"] - with open(out_path / path / "Versions.toml", "w") as f: - toml.dump(versions_to_keep, f) - - if versions is None: - # This is a weak dep; just grab the whole Package.toml - shutil.copy2(registry_path / path / "Package.toml", out_path / path / "Package.toml") - elif uuid in uuid_to_store_path: - # Fill in the local store path for the repo - package_toml = toml.load(registry_path / path / "Package.toml") - package_toml["repo"] = "file://" + uuid_to_store_path[uuid] - with open(out_path / path / "Package.toml", "w") as f: - toml.dump(package_toml, f) +registry["packages"] = { + k: v for k, v in registry["packages"].items() if k in uuid_to_versions +} + +for uuid, versions in uuid_to_versions.items(): + if uuid in package_overrides: + info = package_overrides[uuid] + + # Make a registry entry based on the info from the package override + path = Path(info["name"][0].upper()) / Path(info["name"]) + registry["packages"][uuid] = { + "name": info["name"], + "path": str(path), + } + + os.makedirs(out_path / path) + + # Read the Project.yaml from the src + project = toml.load(Path(info["src"]) / "Project.toml") + + # Generate all the registry files + with open(out_path / path / Path("Compat.toml"), "w") as f: + f.write('["%s"]\n' % info["version"]) + # Write nothing in Compat.toml, because we've already resolved everything + with open(out_path / path / Path("Deps.toml"), "w") as f: + f.write('["%s"]\n' % info["version"]) + if "deps" in project: + toml.dump(project["deps"], f) + with open(out_path / path / Path("Versions.toml"), "w") as f: + f.write('["%s"]\n' % info["version"]) + f.write('git-tree-sha1 = "%s"\n' % info["treehash"]) + with open(out_path / path / Path("Package.toml"), "w") as f: + toml.dump( + { + "name": info["name"], + "uuid": uuid, + "repo": "file://" + info["src"], + }, + f, + ) + + elif uuid in registry["packages"]: + registry_info = registry["packages"][uuid] + name = registry_info["name"] + path = registry_info["path"] + + os.makedirs(out_path / path) + + # Copy some files to the minimal repo unchanged + for f in ["Compat.toml", "Deps.toml", "WeakCompat.toml", "WeakDeps.toml"]: + if (registry_path / path / f).exists(): + shutil.copy2(registry_path / path / f, out_path / path) + + # Copy the Versions.toml file, trimming down to the versions we care about. + # In the case where versions=None, this is a weak dep, and we keep all versions. + all_versions = toml.load(registry_path / path / "Versions.toml") + versions_to_keep = ( + {k: v for k, v in all_versions.items() if k in versions} + if versions != None + else all_versions + ) + for k, v in versions_to_keep.items(): + del v["nix-sha256"] + with open(out_path / path / "Versions.toml", "w") as f: + toml.dump(versions_to_keep, f) + + if versions is None: + # This is a weak dep; just grab the whole Package.toml + shutil.copy2( + registry_path / path / "Package.toml", out_path / path / "Package.toml" + ) + elif uuid in uuid_to_store_path: + # Fill in the local store path for the repo + package_toml = toml.load(registry_path / path / "Package.toml") + package_toml["repo"] = "file://" + uuid_to_store_path[uuid] + with open(out_path / path / "Package.toml", "w") as f: + toml.dump(package_toml, f) # Look for missing weak deps and include them. This can happen when our initial # resolve step finds dependencies, but we fail to resolve them at the project.py # stage. Usually this happens because the package that depends on them does so # as a weak dep, but doesn't have a Package.toml in its repo making this clear. for pkg in desired_packages: - for dep in (pkg.get("deps", []) or []): - uuid = dep["uuid"] - if not uuid in uuid_to_versions: - entry = full_registry["packages"].get(uuid) - if not entry: - print(f"""WARNING: found missing UUID but couldn't resolve it: {uuid}""") - continue - - # Add this entry back to the minimal Registry.toml - registry["packages"][uuid] = entry - - # Bring over the Package.toml - path = Path(entry["path"]) - if (out_path / path / "Package.toml").exists(): - continue - Path(out_path / path).mkdir(parents=True, exist_ok=True) - shutil.copy2(registry_path / path / "Package.toml", out_path / path / "Package.toml") + for dep in pkg.get("deps", []) or []: + uuid = dep["uuid"] + if not uuid in uuid_to_versions: + entry = full_registry["packages"].get(uuid) + if not entry: + print( + f"""WARNING: found missing UUID but couldn't resolve it: {uuid}""" + ) + continue + + # Add this entry back to the minimal Registry.toml + registry["packages"][uuid] = entry + + # Bring over the Package.toml + path = Path(entry["path"]) + if (out_path / path / "Package.toml").exists(): + continue + Path(out_path / path).mkdir(parents=True, exist_ok=True) + shutil.copy2( + registry_path / path / "Package.toml", out_path / path / "Package.toml" + ) # Finally, dump the Registry.toml with open(out_path / "Registry.toml", "w") as f: diff --git a/pkgs/development/julia-modules/python/project.py b/pkgs/development/julia-modules/python/project.py index 4a5f2ae20719c..ef79f1098cdd0 100755 --- a/pkgs/development/julia-modules/python/project.py +++ b/pkgs/development/julia-modules/python/project.py @@ -1,4 +1,3 @@ - from collections import defaultdict import json import os @@ -15,90 +14,103 @@ out_path = Path(sys.argv[5]) with open(desired_packages_path, "r") as f: - desired_packages = yaml.safe_load(f) or [] + desired_packages = yaml.safe_load(f) or [] with open(stdlib_infos_path, "r") as f: - stdlib_infos = yaml.safe_load(f) or [] + stdlib_infos = yaml.safe_load(f) or [] with open(dependencies_path, "r") as f: - uuid_to_store_path = yaml.safe_load(f) + uuid_to_store_path = yaml.safe_load(f) -result = { - "deps": defaultdict(list) -} +result = {"deps": defaultdict(list)} for pkg in desired_packages: - if pkg["uuid"] in package_overrides: - info = package_overrides[pkg["uuid"]] - result["deps"][info["name"]].append({ - "uuid": pkg["uuid"], - "path": info["src"], - }) - continue - - path = uuid_to_store_path.get(pkg["uuid"], None) - isStdLib = False - if pkg["uuid"] in stdlib_infos["stdlibs"]: - path = stdlib_infos["stdlib_root"] + "/" + stdlib_infos["stdlibs"][pkg["uuid"]]["name"] - isStdLib = True - - if path: - if (Path(path) / "Project.toml").exists(): - project_toml = toml.load(Path(path) / "Project.toml") - - deps = [] - weak_deps = project_toml.get("weakdeps", {}) - extensions = project_toml.get("extensions", {}) - - if "deps" in project_toml: - # Build up deps for the manifest, excluding weak deps - weak_deps_uuids = weak_deps.values() - for (dep_name, dep_uuid) in project_toml["deps"].items(): - if not (dep_uuid in weak_deps_uuids): - deps.append(dep_name) + if pkg["uuid"] in package_overrides: + info = package_overrides[pkg["uuid"]] + result["deps"][info["name"]].append( + { + "uuid": pkg["uuid"], + "path": info["src"], + } + ) + continue + + path = uuid_to_store_path.get(pkg["uuid"], None) + isStdLib = False + if pkg["uuid"] in stdlib_infos["stdlibs"]: + path = ( + stdlib_infos["stdlib_root"] + + "/" + + stdlib_infos["stdlibs"][pkg["uuid"]]["name"] + ) + isStdLib = True + + if path: + if (Path(path) / "Project.toml").exists(): + project_toml = toml.load(Path(path) / "Project.toml") + + deps = [] + weak_deps = project_toml.get("weakdeps", {}) + extensions = project_toml.get("extensions", {}) + + if "deps" in project_toml: + # Build up deps for the manifest, excluding weak deps + weak_deps_uuids = weak_deps.values() + for dep_name, dep_uuid in project_toml["deps"].items(): + if not (dep_uuid in weak_deps_uuids): + deps.append(dep_name) + else: + # Not all projects have a Project.toml. In this case, use the deps we + # calculated from the package resolve step. This isn't perfect since it + # will fail to properly split out weak deps, but it's better than nothing. + print( + f"""WARNING: package {pkg["name"]} didn't have a Project.toml in {path}""" + ) + deps = [x["name"] for x in pkg.get("deps", [])] + weak_deps = {} + extensions = {} + + tree_hash = pkg.get("tree_hash", "") + + result["deps"][pkg["name"]].append( + { + "version": pkg["version"], + "uuid": pkg["uuid"], + "git-tree-sha1": (tree_hash if tree_hash != "nothing" else None) + or None, + "deps": deps or None, + "weakdeps": weak_deps or None, + "extensions": extensions or None, + # We *don't* set "path" here, because then Julia will try to use the + # read-only Nix store path instead of cloning to the depot. This will + # cause packages like Conda.jl to fail during the Pkg.build() step. + # + # "path": None if isStdLib else path , + } + ) else: - # Not all projects have a Project.toml. In this case, use the deps we - # calculated from the package resolve step. This isn't perfect since it - # will fail to properly split out weak deps, but it's better than nothing. - print(f"""WARNING: package {pkg["name"]} didn't have a Project.toml in {path}""") - deps = [x["name"] for x in pkg.get("deps", [])] - weak_deps = {} - extensions = {} - - tree_hash = pkg.get("tree_hash", "") - - result["deps"][pkg["name"]].append({ - "version": pkg["version"], - "uuid": pkg["uuid"], - "git-tree-sha1": (tree_hash if tree_hash != "nothing" else None) or None, - "deps": deps or None, - "weakdeps": weak_deps or None, - "extensions": extensions or None, - - # We *don't* set "path" here, because then Julia will try to use the - # read-only Nix store path instead of cloning to the depot. This will - # cause packages like Conda.jl to fail during the Pkg.build() step. - # - # "path": None if isStdLib else path , - }) - else: - print("WARNING: adding a package that we didn't have a path for, and it doesn't seem to be a stdlib", pkg) - result["deps"][pkg["name"]].append({ - "version": pkg["version"], - "uuid": pkg["uuid"], - "deps": [x["name"] for x in pkg["deps"]] - }) + print( + "WARNING: adding a package that we didn't have a path for, and it doesn't seem to be a stdlib", + pkg, + ) + result["deps"][pkg["name"]].append( + { + "version": pkg["version"], + "uuid": pkg["uuid"], + "deps": [x["name"] for x in pkg["deps"]], + } + ) os.makedirs(out_path) with open(out_path / "Manifest.toml", "w") as f: - f.write(f'julia_version = "{stdlib_infos["julia_version"]}"\n') - f.write('manifest_format = "2.0"\n\n') - toml.dump(result, f) + f.write(f'julia_version = "{stdlib_infos["julia_version"]}"\n') + f.write('manifest_format = "2.0"\n\n') + toml.dump(result, f) with open(out_path / "Project.toml", "w") as f: - f.write('[deps]\n') + f.write("[deps]\n") - for pkg in desired_packages: - if pkg.get("is_input", False): - f.write(f'''{pkg["name"]} = "{pkg["uuid"]}"\n''') + for pkg in desired_packages: + if pkg.get("is_input", False): + f.write(f'''{pkg["name"]} = "{pkg["uuid"]}"\n''') diff --git a/pkgs/development/julia-modules/python/sources_nix.py b/pkgs/development/julia-modules/python/sources_nix.py index b0f0a21e3b226..f94f142701cdc 100755 --- a/pkgs/development/julia-modules/python/sources_nix.py +++ b/pkgs/development/julia-modules/python/sources_nix.py @@ -1,4 +1,3 @@ - import json from pathlib import Path import re @@ -15,49 +14,55 @@ out_path = Path(sys.argv[4]) with open(desired_packages_path, "r") as f: - desired_packages = yaml.safe_load(f) or [] + desired_packages = yaml.safe_load(f) or [] registry = toml.load(registry_path / "Registry.toml") + def ensure_version_valid(version): - """ - Ensure a version string is a valid Julia-parsable version. - It doesn't really matter what it looks like as it's just used for overrides. - """ - return re.sub('[^0-9.]','', version) + """ + Ensure a version string is a valid Julia-parsable version. + It doesn't really matter what it looks like as it's just used for overrides. + """ + return re.sub("[^0-9.]", "", version) + with open(out_path, "w") as f: - f.write("{fetchgit}:\n") - f.write("{\n") - for pkg in desired_packages: - uuid = pkg["uuid"] + f.write("{fetchgit}:\n") + f.write("{\n") + for pkg in desired_packages: + uuid = pkg["uuid"] - if pkg["name"] in package_overrides: - treehash = util.get_commit_info(package_overrides[pkg["name"]])["tree"] - f.write(f""" "{uuid}" = {{ + if pkg["name"] in package_overrides: + treehash = util.get_commit_info(package_overrides[pkg["name"]])["tree"] + f.write(f""" "{uuid}" = {{ src = null; # Overridden: will fill in later name = "{pkg["name"]}"; version = "{ensure_version_valid(pkg["version"])}"; treehash = "{treehash}"; }};\n""") - elif uuid in registry["packages"]: - # The treehash is missing for stdlib packages. Don't bother downloading these. - if (not ("tree_hash" in pkg)) or pkg["tree_hash"] == "nothing": continue + elif uuid in registry["packages"]: + # The treehash is missing for stdlib packages. Don't bother downloading these. + if (not ("tree_hash" in pkg)) or pkg["tree_hash"] == "nothing": + continue - registry_info = registry["packages"][uuid] - path = registry_info["path"] - packageToml = toml.load(registry_path / path / "Package.toml") + registry_info = registry["packages"][uuid] + path = registry_info["path"] + packageToml = toml.load(registry_path / path / "Package.toml") - versions_toml = registry_path / path / "Versions.toml" - all_versions = toml.load(versions_toml) - if not pkg["version"] in all_versions: continue - version_to_use = all_versions[pkg["version"]] + versions_toml = registry_path / path / "Versions.toml" + all_versions = toml.load(versions_toml) + if not pkg["version"] in all_versions: + continue + version_to_use = all_versions[pkg["version"]] - if not "nix-sha256" in version_to_use: - raise KeyError(f"""Couldn't find nix-sha256 hash for {pkg["name"]} {pkg["version"]} in {versions_toml}. This might indicate that we failed to prefetch the hash when computing the augmented registry. Was there a relevant failure in {registry_path / "failures.yml"}?""") + if not "nix-sha256" in version_to_use: + raise KeyError( + f"""Couldn't find nix-sha256 hash for {pkg["name"]} {pkg["version"]} in {versions_toml}. This might indicate that we failed to prefetch the hash when computing the augmented registry. Was there a relevant failure in {registry_path / "failures.yml"}?""" + ) - repo = packageToml["repo"] - f.write(f""" "{uuid}" = {{ + repo = packageToml["repo"] + f.write(f""" "{uuid}" = {{ src = fetchgit {{ url = "{repo}"; rev = "{version_to_use["git-tree-sha1"]}"; @@ -67,9 +72,9 @@ def ensure_version_valid(version): version = "{pkg["version"]}"; treehash = "{version_to_use["git-tree-sha1"]}"; }};\n""") - else: - # This is probably a stdlib - # print("WARNING: couldn't figure out what to do with pkg in sources_nix.py", pkg) - pass + else: + # This is probably a stdlib + # print("WARNING: couldn't figure out what to do with pkg in sources_nix.py", pkg) + pass - f.write("}") + f.write("}") diff --git a/pkgs/development/julia-modules/python/util.py b/pkgs/development/julia-modules/python/util.py index 34aaab970e594..6a51f5d94cc69 100644 --- a/pkgs/development/julia-modules/python/util.py +++ b/pkgs/development/julia-modules/python/util.py @@ -1,12 +1,21 @@ - import os import subprocess import tempfile + def get_commit_info(repo): - with tempfile.TemporaryDirectory() as home_dir: - env_with_home = os.environ.copy() - env_with_home["HOME"] = home_dir - subprocess.check_output(["git", "config", "--global", "--add", "safe.directory", repo], env=env_with_home) - lines = subprocess.check_output(["git", "log", "--pretty=raw"], cwd=repo, env=env_with_home).decode().split("\n") - return dict([x.split() for x in lines if len(x.split()) == 2]) + with tempfile.TemporaryDirectory() as home_dir: + env_with_home = os.environ.copy() + env_with_home["HOME"] = home_dir + subprocess.check_output( + ["git", "config", "--global", "--add", "safe.directory", repo], + env=env_with_home, + ) + lines = ( + subprocess.check_output( + ["git", "log", "--pretty=raw"], cwd=repo, env=env_with_home + ) + .decode() + .split("\n") + ) + return dict([x.split() for x in lines if len(x.split()) == 2]) diff --git a/pkgs/development/julia-modules/tests/process_top_n.py b/pkgs/development/julia-modules/tests/process_top_n.py index 90de70ccec4d4..5e554a90c2bec 100755 --- a/pkgs/development/julia-modules/tests/process_top_n.py +++ b/pkgs/development/julia-modules/tests/process_top_n.py @@ -13,11 +13,11 @@ # Generate list of tuples (UUID, count) rows = [] with open(requests_csv_path) as f: - reader = csv.reader(f) - for row in reader: - if row[2] == "user": - # Get UUID and request_count - rows.append((row[0], int(row[4]))) + reader = csv.reader(f) + for row in reader: + if row[2] == "user": + # Get UUID and request_count + rows.append((row[0], int(row[4]))) rows.sort(key=(lambda x: x[1]), reverse=True) # Build a map from UUID -> name @@ -25,9 +25,10 @@ uuid_to_name = {k: v["name"] for k, v in registry["packages"].items()} results = [] -for (uuid, count) in rows: - name = uuid_to_name.get(uuid) - if not name: continue - results.append({ "uuid": uuid, "name": uuid_to_name.get(uuid), "count": count }) +for uuid, count in rows: + name = uuid_to_name.get(uuid) + if not name: + continue + results.append({"uuid": uuid, "name": uuid_to_name.get(uuid), "count": count}) yaml.dump(results, sys.stdout, default_flow_style=False) diff --git a/pkgs/development/libraries/libxcrypt/check_passthru_matches.py b/pkgs/development/libraries/libxcrypt/check_passthru_matches.py index ebe728e9a69b3..8d82f3e498e42 100644 --- a/pkgs/development/libraries/libxcrypt/check_passthru_matches.py +++ b/pkgs/development/libraries/libxcrypt/check_passthru_matches.py @@ -58,12 +58,12 @@ def main() -> None: f"${scheme}$" for scheme in enabled_crypt_scheme_ids ) - assert ( - len(expected_supported_formats - passthru_supported_schemes) == 0 - ), f"libxcrypt package enables the following crypt schemes that are not listed in passthru.enabledCryptSchemeIds: {expected_supported_formats - passthru_supported_schemes}" - assert ( - len(passthru_supported_schemes - expected_supported_formats) == 0 - ), f"libxcrypt package lists the following crypt schemes in passthru.enabledCryptSchemeIds that are not supported: {passthru_supported_schemes - expected_supported_formats}" + assert len(expected_supported_formats - passthru_supported_schemes) == 0, ( + f"libxcrypt package enables the following crypt schemes that are not listed in passthru.enabledCryptSchemeIds: {expected_supported_formats - passthru_supported_schemes}" + ) + assert len(passthru_supported_schemes - expected_supported_formats) == 0, ( + f"libxcrypt package lists the following crypt schemes in passthru.enabledCryptSchemeIds that are not supported: {passthru_supported_schemes - expected_supported_formats}" + ) if __name__ == "__main__": diff --git a/pkgs/development/libraries/mesa/update-wraps.py b/pkgs/development/libraries/mesa/update-wraps.py index 037dfc0397b24..afb601dbd48fa 100644 --- a/pkgs/development/libraries/mesa/update-wraps.py +++ b/pkgs/development/libraries/mesa/update-wraps.py @@ -34,11 +34,13 @@ def main(dir: str): hash = to_sri(parser.get("wrap-file", "source_hash")) - result.append({ - "pname": name, - "version": version, - "hash": hash, - }) + result.append( + { + "pname": name, + "version": version, + "hash": hash, + } + ) here = pathlib.Path(__file__).parent with (here / "wraps.json").open("w") as fd: @@ -46,5 +48,5 @@ def main(dir: str): _ = fd.write("\n") -if __name__ == '__main__': +if __name__ == "__main__": main(*sys.argv[1:]) diff --git a/pkgs/development/node-packages/remove-attr.py b/pkgs/development/node-packages/remove-attr.py index 2b13dcd5ee872..dcf7a51c28476 100755 --- a/pkgs/development/node-packages/remove-attr.py +++ b/pkgs/development/node-packages/remove-attr.py @@ -10,11 +10,16 @@ def remove(attr): - with open(os.path.join(os.path.dirname(__file__), 'node-packages.json'), 'r+') as node_packages_json: + with open( + os.path.join(os.path.dirname(__file__), "node-packages.json"), "r+" + ) as node_packages_json: packages = json.load(node_packages_json) idx = 0 while idx < len(packages): - if packages[idx] == attr or (isinstance(packages[idx], collections.abc.Mapping) and next(iter(packages[idx].keys())) == attr): + if packages[idx] == attr or ( + isinstance(packages[idx], collections.abc.Mapping) + and next(iter(packages[idx].keys())) == attr + ): del packages[idx] else: idx += 1 @@ -22,39 +27,48 @@ def remove(attr): node_packages_json.seek(0) for idx, package in enumerate(packages): if idx == 0: - node_packages_json.write('[\n ') + node_packages_json.write("[\n ") else: - node_packages_json.write(', ') + node_packages_json.write(", ") json.dump(package, node_packages_json) - node_packages_json.write('\n') - node_packages_json.write(']\n') + node_packages_json.write("\n") + node_packages_json.write("]\n") node_packages_json.truncate() - with fileinput.input(os.path.join(os.path.dirname(__file__), 'node-packages.nix'), inplace=1) as node_packages: + with fileinput.input( + os.path.join(os.path.dirname(__file__), "node-packages.nix"), inplace=1 + ) as node_packages: safe_attr = re.escape(attr) in_attr = False for line in node_packages: if in_attr: - if re.fullmatch(r' \};\n', line): + if re.fullmatch(r" \};\n", line): in_attr = False else: - if re.fullmatch(rf' (?:{safe_attr}|"{safe_attr}") = nodeEnv\.buildNodePackage \{{\n', line): + if re.fullmatch( + rf' (?:{safe_attr}|"{safe_attr}") = nodeEnv\.buildNodePackage \{{\n', + line, + ): in_attr = True else: sys.stdout.write(line) - with fileinput.input(os.path.join(os.path.dirname(__file__), 'main-programs.nix'), inplace=1) as main_programs: + with fileinput.input( + os.path.join(os.path.dirname(__file__), "main-programs.nix"), inplace=1 + ) as main_programs: safe_attr = re.escape(attr) for line in main_programs: if not re.fullmatch(rf' "?{safe_attr}"? = ".*";\n', line): sys.stdout.write(line) - with fileinput.input(os.path.join(os.path.dirname(__file__), 'overrides.nix'), inplace=1) as overrides: + with fileinput.input( + os.path.join(os.path.dirname(__file__), "overrides.nix"), inplace=1 + ) as overrides: safe_attr = re.escape(attr) in_attr = False for line in overrides: if in_attr: - if re.fullmatch(r' \}\)?;\n', line): + if re.fullmatch(r" \}\)?;\n", line): in_attr = False else: if re.fullmatch(rf' (?:{safe_attr}|"{safe_attr}") = .* \{{\n', line): @@ -63,11 +77,13 @@ def remove(attr): sys.stdout.write(line) -if __name__ == '__main__': +if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description='Remove a given package from the node-packages.nix file') - parser.add_argument('attr', help='The package attribute to remove') + parser = argparse.ArgumentParser( + description="Remove a given package from the node-packages.nix file" + ) + parser.add_argument("attr", help="The package attribute to remove") args = parser.parse_args() remove(args.attr) diff --git a/pkgs/development/python-modules/cmake/stub/cmake/__init__.py b/pkgs/development/python-modules/cmake/stub/cmake/__init__.py index 512a13f3d94ab..e485b9e691cde 100644 --- a/pkgs/development/python-modules/cmake/stub/cmake/__init__.py +++ b/pkgs/development/python-modules/cmake/stub/cmake/__init__.py @@ -2,18 +2,22 @@ import subprocess import sys -__version__ = '@version@' +__version__ = "@version@" + +CMAKE_BIN_DIR = "@CMAKE_BIN_DIR@" -CMAKE_BIN_DIR = '@CMAKE_BIN_DIR@' def _program(name, args): return subprocess.call([os.path.join(CMAKE_BIN_DIR, name)] + args, close_fds=False) + def cmake(): - raise SystemExit(_program('cmake', sys.argv[1:])) + raise SystemExit(_program("cmake", sys.argv[1:])) + def cpack(): - raise SystemExit(_program('cpack', sys.argv[1:])) + raise SystemExit(_program("cpack", sys.argv[1:])) + def ctest(): - raise SystemExit(_program('ctest', sys.argv[1:])) + raise SystemExit(_program("ctest", sys.argv[1:])) diff --git a/pkgs/development/python-modules/gradio/conftest-skip-network-errors.py b/pkgs/development/python-modules/gradio/conftest-skip-network-errors.py index a34ee1bbcd873..d728626eda937 100644 --- a/pkgs/development/python-modules/gradio/conftest-skip-network-errors.py +++ b/pkgs/development/python-modules/gradio/conftest-skip-network-errors.py @@ -3,10 +3,12 @@ from _pytest.runner import pytest_runtest_makereport as orig_pytest_runtest_makereport + # We use BaseException to minimize the chance it gets caught and 'pass'ed class NixNetworkAccessDeniedError(BaseException): pass + def pytest_runtest_makereport(item, call): """ Modifies test results after-the-fact. The function name is magic, see: @@ -27,20 +29,23 @@ def iterate_exc_chain(exc: Exception): if call.excinfo is not None: for exc in iterate_exc_chain(call.excinfo.value): if isinstance(exc, NixNetworkAccessDeniedError): - tr.outcome, tr.wasxfail = 'skipped', "reason: Requires network access." + tr.outcome, tr.wasxfail = "skipped", "reason: Requires network access." if isinstance(exc, socket.gaierror): - tr.outcome, tr.wasxfail = 'skipped', "reason: Requires network access." + tr.outcome, tr.wasxfail = "skipped", "reason: Requires network access." if isinstance(exc, httpx.ConnectError): - tr.outcome, tr.wasxfail = 'skipped', "reason: Requires network access." + tr.outcome, tr.wasxfail = "skipped", "reason: Requires network access." if isinstance(exc, FileNotFoundError): # gradio specific - tr.outcome, tr.wasxfail = 'skipped', "reason: Pypi dist bad." + tr.outcome, tr.wasxfail = "skipped", "reason: Pypi dist bad." return tr + # replace network access with exception + def deny_network_access(*a, **kw): raise NixNetworkAccessDeniedError + import httpx import requests import socket diff --git a/pkgs/development/python-modules/invisible-watermark/tests/python/decode.py b/pkgs/development/python-modules/invisible-watermark/tests/python/decode.py index c1b7e52ff6f51..f6796a175d047 100644 --- a/pkgs/development/python-modules/invisible-watermark/tests/python/decode.py +++ b/pkgs/development/python-modules/invisible-watermark/tests/python/decode.py @@ -4,16 +4,16 @@ import cv2 from imwatermark import WatermarkDecoder -input_file = os.environ['image'] -output_file_path = os.environ['out'] -num_bits = int(os.environ['num_bits']) -method = os.environ['method'] +input_file = os.environ["image"] +output_file_path = os.environ["out"] +num_bits = int(os.environ["num_bits"]) +method = os.environ["method"] bgr = cv2.imread(input_file) -decoder = WatermarkDecoder('bytes', num_bits) +decoder = WatermarkDecoder("bytes", num_bits) watermark = decoder.decode(bgr, method) -message = watermark.decode('utf-8') +message = watermark.decode("utf-8") -with open(output_file_path, 'w') as f: +with open(output_file_path, "w") as f: f.write(message) diff --git a/pkgs/development/python-modules/invisible-watermark/tests/python/encode.py b/pkgs/development/python-modules/invisible-watermark/tests/python/encode.py index 59a72cffe70d6..d3aff6173aa24 100644 --- a/pkgs/development/python-modules/invisible-watermark/tests/python/encode.py +++ b/pkgs/development/python-modules/invisible-watermark/tests/python/encode.py @@ -4,18 +4,18 @@ import cv2 from imwatermark import WatermarkEncoder -input_file_path = os.environ['image'] -output_dir = os.environ['out'] -message = os.environ['message'] -method = os.environ['method'] +input_file_path = os.environ["image"] +output_dir = os.environ["out"] +message = os.environ["message"] +method = os.environ["method"] os.mkdir(output_dir) bgr = cv2.imread(input_file_path) encoder = WatermarkEncoder() -encoder.set_watermark('bytes', message.encode('utf-8')) +encoder.set_watermark("bytes", message.encode("utf-8")) bgr_encoded = encoder.encode(bgr, method) -output_file = os.path.join(output_dir, 'test_wm.png') +output_file = os.path.join(output_dir, "test_wm.png") cv2.imwrite(output_file, bgr_encoded) diff --git a/pkgs/development/python-modules/kaleido/tests.py b/pkgs/development/python-modules/kaleido/tests.py index 0cbcaaae60bed..f2aa33a2c2734 100644 --- a/pkgs/development/python-modules/kaleido/tests.py +++ b/pkgs/development/python-modules/kaleido/tests.py @@ -4,7 +4,7 @@ out = os.environ["out"] if not os.path.exists(out): - os.makedirs(out) + os.makedirs(out) outfile = os.path.join(out, "figure.png") fig = px.scatter(px.data.iris(), x="sepal_length", y="sepal_width", color="species") diff --git a/pkgs/development/python-modules/ninja/stub/ninja/__init__.py b/pkgs/development/python-modules/ninja/stub/ninja/__init__.py index d6e7abe152059..ceaf5e87c35e9 100644 --- a/pkgs/development/python-modules/ninja/stub/ninja/__init__.py +++ b/pkgs/development/python-modules/ninja/stub/ninja/__init__.py @@ -2,12 +2,14 @@ import subprocess import sys -__version__ = '@version@' +__version__ = "@version@" + +BIN_DIR = "@BIN_DIR@" -BIN_DIR = '@BIN_DIR@' def _program(name, args): return subprocess.call([os.path.join(BIN_DIR, name)] + args, close_fds=False) + def ninja(): - raise SystemExit(_program('ninja', sys.argv[1:])) + raise SystemExit(_program("ninja", sys.argv[1:])) diff --git a/pkgs/development/python-modules/nixpkgs-plugin-update/nixpkgs-plugin-update/src/nixpkgs_plugin_update/__init__.py b/pkgs/development/python-modules/nixpkgs-plugin-update/nixpkgs-plugin-update/src/nixpkgs_plugin_update/__init__.py index 299d5a056a992..837ad3efa88c2 100644 --- a/pkgs/development/python-modules/nixpkgs-plugin-update/nixpkgs-plugin-update/src/nixpkgs_plugin_update/__init__.py +++ b/pkgs/development/python-modules/nixpkgs-plugin-update/nixpkgs-plugin-update/src/nixpkgs_plugin_update/__init__.py @@ -198,9 +198,9 @@ def latest_commit(self) -> tuple[str, datetime]: assert commit_link is not None, f"No link tag found feed entry {xml!r}" url = urlparse(commit_link.get("href")) updated_tag = latest_entry.find(ATOM_UPDATED) - assert ( - updated_tag is not None and updated_tag.text is not None - ), f"No updated tag found feed entry {xml!r}" + assert updated_tag is not None and updated_tag.text is not None, ( + f"No updated tag found feed entry {xml!r}" + ) updated = datetime.strptime(updated_tag.text, "%Y-%m-%dT%H:%M:%SZ") return Path(str(url.path)).name, updated @@ -427,7 +427,7 @@ def get_current_plugins( raise ValueError(f"Cannot parse version: {attr['version']}") date = datetime.strptime(version.group(), "%Y-%m-%d") - pdesc = PluginDesc.load_from_string(config, f'{attr["homePage"]} as {name}') + pdesc = PluginDesc.load_from_string(config, f"{attr['homePage']} as {name}") p = Plugin( attr["pname"], checksum["rev"], @@ -554,7 +554,7 @@ def merge_results( for plugin_desc, plugin, redirect in fetched: # Check if plugin is a Plugin object and has normalized_name attribute - if isinstance(plugin, Plugin) and hasattr(plugin, 'normalized_name'): + if isinstance(plugin, Plugin) and hasattr(plugin, "normalized_name"): result[plugin.normalized_name] = (plugin_desc, plugin, redirect) elif isinstance(plugin, Exception): # For exceptions, we can't determine the normalized_name @@ -562,7 +562,9 @@ def merge_results( log.error(f"Error fetching plugin {plugin_desc.name}: {plugin!r}") else: # For unexpected types, log the issue - log.error(f"Unexpected plugin type for {plugin_desc.name}: {type(plugin)}") + log.error( + f"Unexpected plugin type for {plugin_desc.name}: {type(plugin)}" + ) return list(result.values()) diff --git a/pkgs/development/python-modules/pytest-cov-stub/src/pytest_cov/plugin.py b/pkgs/development/python-modules/pytest-cov-stub/src/pytest_cov/plugin.py index d03f4d1f7a882..fcb0206ab9c75 100644 --- a/pkgs/development/python-modules/pytest-cov-stub/src/pytest_cov/plugin.py +++ b/pkgs/development/python-modules/pytest-cov-stub/src/pytest_cov/plugin.py @@ -1,93 +1,102 @@ import argparse import pytest + class CoverageError(Exception): pass + class PytestCovWarning(pytest.PytestWarning): pass + class CovDisabledWarning(PytestCovWarning): pass + class CovReportWarning(PytestCovWarning): pass + class StoreReport(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): report_type, file = values namespace.cov_report[report_type] = file + def pytest_addoption(parser): - group = parser.getgroup('cov', 'coverage reporting') + group = parser.getgroup("cov", "coverage reporting") group.addoption( - '--cov', - action='append', + "--cov", + action="append", default=[], - metavar='SOURCE', - nargs='?', + metavar="SOURCE", + nargs="?", const=True, - dest='cov_source', + dest="cov_source", ) group.addoption( - '--cov-reset', - action='store_const', + "--cov-reset", + action="store_const", const=[], - dest='cov_source', + dest="cov_source", ) group.addoption( - '--cov-report', + "--cov-report", action=StoreReport, default={}, - metavar='TYPE', + metavar="TYPE", type=lambda x: x.split(":", 1) if ":" in x else (x, None), ) group.addoption( - '--cov-config', - action='store', - default='.coveragerc', - metavar='PATH', + "--cov-config", + action="store", + default=".coveragerc", + metavar="PATH", ) group.addoption( - '--no-cov-on-fail', - action='store_true', + "--no-cov-on-fail", + action="store_true", default=False, ) group.addoption( - '--no-cov', - action='store_true', + "--no-cov", + action="store_true", default=False, ) group.addoption( - '--cov-fail-under', - action='store', - metavar='MIN', + "--cov-fail-under", + action="store", + metavar="MIN", type=str, ) group.addoption( - '--cov-append', - action='store_true', + "--cov-append", + action="store_true", default=False, ) group.addoption( - '--cov-branch', - action='store_true', + "--cov-branch", + action="store_true", default=None, ) group.addoption( - '--cov-context', - action='store', - metavar='CONTEXT', + "--cov-context", + action="store", + metavar="CONTEXT", type=str, ) + def pytest_configure(config): - config.addinivalue_line('markers', 'no_cover: disable coverage for this test.') + config.addinivalue_line("markers", "no_cover: disable coverage for this test.") + @pytest.fixture def no_cover(): pass + @pytest.fixture def cov(): pass diff --git a/pkgs/development/python-modules/recursive-pth-loader/sitecustomize.py b/pkgs/development/python-modules/recursive-pth-loader/sitecustomize.py index 057e779803cbd..d5283c633813f 100644 --- a/pkgs/development/python-modules/recursive-pth-loader/sitecustomize.py +++ b/pkgs/development/python-modules/recursive-pth-loader/sitecustomize.py @@ -14,7 +14,7 @@ for path_idx, sitedir in enumerate(sys.path): # ignore non-site-packages - if not sitedir.endswith('site-packages'): + if not sitedir.endswith("site-packages"): continue # find pth files @@ -43,4 +43,4 @@ line = line.rstrip() dir, dircase = site.makepath(sitedir, line) if not dircase in sys.path: - sys.path.insert(path_idx+1, dir) + sys.path.insert(path_idx + 1, dir) diff --git a/pkgs/development/python-modules/spacy-transformers/annotation-test/annotate.py b/pkgs/development/python-modules/spacy-transformers/annotation-test/annotate.py index 96854b6cae8ec..43fc3f3b5ee13 100644 --- a/pkgs/development/python-modules/spacy-transformers/annotation-test/annotate.py +++ b/pkgs/development/python-modules/spacy-transformers/annotation-test/annotate.py @@ -7,7 +7,8 @@ "seriously. “I can tell you very senior CEOs of major American " "car companies would shake my hand and turn away because I wasn’t " "worth talking to,” said Thrun, in an interview with Recode earlier " - "this week.") + "this week." +) @pytest.fixture @@ -21,49 +22,48 @@ def doc_en_core_web_trf(en_core_web_trf): def test_entities(doc_en_core_web_trf): - entities = list(map(lambda e: (e.text, e.label_), - doc_en_core_web_trf.ents)) + entities = list(map(lambda e: (e.text, e.label_), doc_en_core_web_trf.ents)) assert entities == [ - ('Sebastian Thrun', 'PERSON'), - ('Google', 'ORG'), - ('2007', 'DATE'), - ('American', 'NORP'), - ('Thrun', 'PERSON'), - ('Recode', 'ORG'), - ('earlier this week', 'DATE'), + ("Sebastian Thrun", "PERSON"), + ("Google", "ORG"), + ("2007", "DATE"), + ("American", "NORP"), + ("Thrun", "PERSON"), + ("Recode", "ORG"), + ("earlier this week", "DATE"), ] def test_nouns(doc_en_core_web_trf): - assert [ - chunk.text for chunk in doc_en_core_web_trf.noun_chunks] == [ - 'Sebastian Thrun', - 'self-driving cars', - 'Google', - 'few people', - 'the company', - 'him', - 'I', - 'you', - 'very senior CEOs', - 'major American car companies', - 'my hand', - 'I', - 'Thrun', - 'an interview', - 'Recode'] + assert [chunk.text for chunk in doc_en_core_web_trf.noun_chunks] == [ + "Sebastian Thrun", + "self-driving cars", + "Google", + "few people", + "the company", + "him", + "I", + "you", + "very senior CEOs", + "major American car companies", + "my hand", + "I", + "Thrun", + "an interview", + "Recode", + ] def test_verbs(doc_en_core_web_trf): - assert [ - token.lemma_ for token in doc_en_core_web_trf if token.pos_ == "VERB"] == [ - 'start', - 'work', - 'drive', - 'take', - 'tell', - 'shake', - 'turn', - 'talk', - 'say'] + assert [token.lemma_ for token in doc_en_core_web_trf if token.pos_ == "VERB"] == [ + "start", + "work", + "drive", + "take", + "tell", + "shake", + "turn", + "talk", + "say", + ] diff --git a/pkgs/development/python-modules/spacy/annotation-test/annotate.py b/pkgs/development/python-modules/spacy/annotation-test/annotate.py index db5fac2f7f6cf..f998f44abf66b 100644 --- a/pkgs/development/python-modules/spacy/annotation-test/annotate.py +++ b/pkgs/development/python-modules/spacy/annotation-test/annotate.py @@ -7,7 +7,8 @@ "seriously. “I can tell you very senior CEOs of major American " "car companies would shake my hand and turn away because I wasn’t " "worth talking to,” said Thrun, in an interview with Recode earlier " - "this week.") + "this week." +) @pytest.fixture @@ -21,49 +22,48 @@ def doc_en_core_web_sm(en_core_web_sm): def test_entities(doc_en_core_web_sm): - entities = list(map(lambda e: (e.text, e.label_), - doc_en_core_web_sm.ents)) + entities = list(map(lambda e: (e.text, e.label_), doc_en_core_web_sm.ents)) assert entities == [ - ('Sebastian Thrun', 'PERSON'), - ('Google', 'ORG'), - ('2007', 'DATE'), - ('American', 'NORP'), - ('Thrun', 'GPE'), - ('Recode', 'ORG'), - ('earlier this week', 'DATE'), + ("Sebastian Thrun", "PERSON"), + ("Google", "ORG"), + ("2007", "DATE"), + ("American", "NORP"), + ("Thrun", "GPE"), + ("Recode", "ORG"), + ("earlier this week", "DATE"), ] def test_nouns(doc_en_core_web_sm): - assert [ - chunk.text for chunk in doc_en_core_web_sm.noun_chunks] == [ - 'Sebastian Thrun', - 'self-driving cars', - 'Google', - 'few people', - 'the company', - 'him', - 'I', - 'you', - 'very senior CEOs', - 'major American car companies', - 'my hand', - 'I', - 'Thrun', - 'an interview', - 'Recode'] + assert [chunk.text for chunk in doc_en_core_web_sm.noun_chunks] == [ + "Sebastian Thrun", + "self-driving cars", + "Google", + "few people", + "the company", + "him", + "I", + "you", + "very senior CEOs", + "major American car companies", + "my hand", + "I", + "Thrun", + "an interview", + "Recode", + ] def test_verbs(doc_en_core_web_sm): - assert [ - token.lemma_ for token in doc_en_core_web_sm if token.pos_ == "VERB"] == [ - 'start', - 'work', - 'drive', - 'take', - 'tell', - 'shake', - 'turn', - 'talk', - 'say'] + assert [token.lemma_ for token in doc_en_core_web_sm if token.pos_ == "VERB"] == [ + "start", + "work", + "drive", + "take", + "tell", + "shake", + "turn", + "talk", + "say", + ] diff --git a/pkgs/development/python-modules/waitress-django/src/setup.py b/pkgs/development/python-modules/waitress-django/src/setup.py index 65f51237b589d..0efe0dc6406aa 100644 --- a/pkgs/development/python-modules/waitress-django/src/setup.py +++ b/pkgs/development/python-modules/waitress-django/src/setup.py @@ -2,10 +2,11 @@ from distutils.core import setup -setup( name = "waitress-django" - , version = "1.0.0" - , description = "A waitress WSGI server serving django" - , author = "Bas van Dijk" - , author_email = "v.dijk.bas@gmail.com" - , scripts = ["waitress-serve-django"] - ) +setup( + name="waitress-django", + version="1.0.0", + description="A waitress WSGI server serving django", + author="Bas van Dijk", + author_email="v.dijk.bas@gmail.com", + scripts=["waitress-serve-django"], +) diff --git a/pkgs/development/tools/build-managers/gradle/compress-deps-json.py b/pkgs/development/tools/build-managers/gradle/compress-deps-json.py index 6397892835adf..40d2537f9e49d 100644 --- a/pkgs/development/tools/build-managers/gradle/compress-deps-json.py +++ b/pkgs/development/tools/build-managers/gradle/compress-deps-json.py @@ -155,7 +155,9 @@ new_data[part1] = info1 data = new_data -new_data["!comment"] = "This is a nixpkgs Gradle dependency lockfile. For more details, refer to the Gradle section in the nixpkgs manual." # type: ignore +new_data["!comment"] = ( + "This is a nixpkgs Gradle dependency lockfile. For more details, refer to the Gradle section in the nixpkgs manual." # type: ignore +) new_data["!version"] = 1 # type: ignore with open(sys.argv[2], "wt") as f: diff --git a/pkgs/development/tools/electron/binary/update.py b/pkgs/development/tools/electron/binary/update.py index 67efba73c3624..99779606bd2fb 100755 --- a/pkgs/development/tools/electron/binary/update.py +++ b/pkgs/development/tools/electron/binary/update.py @@ -19,6 +19,7 @@ The `update` and `update-all` commands accept an optional `--commit` flag to automatically commit the changes for you. """ + import logging import os import subprocess @@ -27,6 +28,7 @@ import click_log from typing import Tuple + os.chdir(os.path.dirname(__file__)) sys.path.append("..") from update_util import * @@ -51,6 +53,7 @@ "aarch64-darwin": "darwin-arm64", } + def get_shasums256(version: str) -> list: """Returns the contents of SHASUMS256.txt""" try: @@ -150,11 +153,11 @@ def update_binary(major_version: str, commit: bool, chromedriver: bool) -> None: commit: Whether the updater should commit the result """ if chromedriver: - json_path=CHROMEDRIVER_INFO_JSON + json_path = CHROMEDRIVER_INFO_JSON package_name = f"electron-chromedriver_{major_version}" - update_fn=get_chromedriver_hashes + update_fn = get_chromedriver_hashes else: - json_path=BINARY_INFO_JSON + json_path = BINARY_INFO_JSON package_name = f"electron_{major_version}-bin" update_fn = get_electron_hashes print(f"Updating {package_name}") @@ -190,7 +193,9 @@ def update_chromedriver(version: str, commit: bool) -> None: @cli.command("update", help="Update a single major release") -@click.option("-v", "--version", required=True, type=str, help="The major version, e.g. '23'") +@click.option( + "-v", "--version", required=True, type=str, help="The major version, e.g. '23'" +) @click.option("-c", "--commit", is_flag=True, default=False, help="Commit the result") def update(version: str, commit: bool) -> None: update_binary(version, commit, False) diff --git a/pkgs/development/tools/electron/update.py b/pkgs/development/tools/electron/update.py index 2ae0c2e4664dd..d90cd2ac6549d 100755 --- a/pkgs/development/tools/electron/update.py +++ b/pkgs/development/tools/electron/update.py @@ -20,6 +20,7 @@ flag to automatically commit the changes for you, and `--force` to skip the up-to-date version check. """ + import base64 import json import logging @@ -46,7 +47,11 @@ os.chdir(os.path.dirname(__file__)) # Absolute path of nixpkgs top-level directory -NIXPKGS_PATH = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode("utf-8").strip() +NIXPKGS_PATH = ( + subprocess.check_output(["git", "rev-parse", "--show-toplevel"]) + .decode("utf-8") + .strip() +) memory: Memory = Memory("cache", verbose=0) @@ -56,9 +61,13 @@ def get_gclient_data(rev: str) -> any: output = subprocess.check_output( - ["gclient2nix", "generate", - f"https://github.com/electron/electron@{rev}", - "--root", "src/electron"] + [ + "gclient2nix", + "generate", + f"https://github.com/electron/electron@{rev}", + "--root", + "src/electron", + ] ) return json.loads(output) @@ -69,7 +78,7 @@ def get_chromium_file(chromium_tag: str, filepath: str) -> str: urlopen( f"https://chromium.googlesource.com/chromium/src.git/+/{chromium_tag}/{filepath}?format=TEXT" ).read() - ).decode("utf-8") + ).decode("utf-8") def get_electron_file(electron_tag: str, filepath: str) -> str: @@ -89,6 +98,7 @@ def get_gn_hash(gn_version, gn_commit): out = subprocess.check_output(["nurl", "--hash", "--expr", expr]) return out.decode("utf-8").strip() + @memory.cache def get_chromium_gn_source(chromium_tag: str) -> dict: gn_pattern = r"'gn_version': 'git_revision:([0-9a-f]{40})'" @@ -101,7 +111,9 @@ def get_chromium_gn_source(chromium_tag: str) -> dict: .split(")]}'\n")[1] ) - gn_commit_date = datetime.strptime(gn_commit_info["committer"]["time"], "%a %b %d %H:%M:%S %Y %z") + gn_commit_date = datetime.strptime( + gn_commit_info["committer"]["time"], "%a %b %d %H:%M:%S %Y %z" + ) gn_date = gn_commit_date.astimezone(UTC).date().isoformat() gn_version = f"0-unstable-{gn_date}" @@ -113,6 +125,7 @@ def get_chromium_gn_source(chromium_tag: str) -> dict: } } + @memory.cache def get_electron_yarn_hash(electron_tag: str) -> str: print(f"prefetch-yarn-deps", file=sys.stderr) @@ -125,12 +138,15 @@ def get_electron_yarn_hash(electron_tag: str) -> str: .strip() ) + @memory.cache def get_chromium_npm_hash(chromium_tag: str) -> str: print(f"prefetch-npm-deps", file=sys.stderr) with tempfile.TemporaryDirectory() as tmp_dir: with open(tmp_dir + "/package-lock.json", "w") as f: - f.write(get_chromium_file(chromium_tag, "third_party/node/package-lock.json")) + f.write( + get_chromium_file(chromium_tag, "third_party/node/package-lock.json") + ) return ( subprocess.check_output( ["prefetch-npm-deps", tmp_dir + "/package-lock.json"] @@ -141,9 +157,13 @@ def get_chromium_npm_hash(chromium_tag: str) -> str: def get_update(major_version: str, m: str, gclient_data: any) -> Tuple[str, dict]: - tasks = [] - a = lambda: (("electron_yarn_hash", get_electron_yarn_hash(gclient_data["src/electron"]["args"]["tag"]))) + a = lambda: ( + ( + "electron_yarn_hash", + get_electron_yarn_hash(gclient_data["src/electron"]["args"]["tag"]), + ) + ) tasks.append(delayed(a)()) a = lambda: ( ( @@ -194,9 +214,7 @@ def update_source(version: str, commit: bool, force: bool) -> None: old_info = load_info_json(SOURCE_INFO_JSON) old_version = ( - old_info[major_version]["version"] - if major_version in old_info - else None + old_info[major_version]["version"] if major_version in old_info else None ) m, rev = get_latest_version(major_version) @@ -222,16 +240,22 @@ def cli() -> None: @cli.command("update", help="Update a single major release") -@click.option("-v", "--version", required=True, type=str, help="The major version, e.g. '23'") +@click.option( + "-v", "--version", required=True, type=str, help="The major version, e.g. '23'" +) @click.option("-c", "--commit", is_flag=True, default=False, help="Commit the result") -@click.option("-f", "--force", is_flag=True, default=False, help="Skip up-to-date version check") +@click.option( + "-f", "--force", is_flag=True, default=False, help="Skip up-to-date version check" +) def update(version: str, commit: bool, force: bool) -> None: update_source(version, commit, force) @cli.command("update-all", help="Update all releases at once") @click.option("-c", "--commit", is_flag=True, default=False, help="Commit the result") -@click.option("-f", "--force", is_flag=True, default=False, help="Skip up-to-date version check") +@click.option( + "-f", "--force", is_flag=True, default=False, help="Skip up-to-date version check" +) def update_all(commit: bool, force: bool) -> None: """Update all eletron-source releases at once diff --git a/pkgs/development/tools/electron/update_util.py b/pkgs/development/tools/electron/update_util.py index 4c3f652776da2..767a1e8c2c138 100755 --- a/pkgs/development/tools/electron/update_util.py +++ b/pkgs/development/tools/electron/update_util.py @@ -13,6 +13,7 @@ releases_json = None + # Releases that have reached end-of-life no longer receive any updates # and it is rather pointless trying to update those. # @@ -39,6 +40,7 @@ def supported_version_range() -> range: 1, ) + def get_latest_version(major_version: str) -> Tuple[str, str]: """Returns the latest version for a given major version""" electron_releases: dict = json.loads( @@ -118,12 +120,12 @@ def commit_result( path: Path to the lockfile to be committed """ - assert ( - isinstance(package_name, str) and len(package_name) > 0 - ), "Argument `package_name` cannot be empty" - assert ( - isinstance(new_version, str) and len(new_version) > 0 - ), "Argument `new_version` cannot be empty" + assert isinstance(package_name, str) and len(package_name) > 0, ( + "Argument `package_name` cannot be empty" + ) + assert isinstance(new_version, str) and len(new_version) > 0, ( + "Argument `new_version` cannot be empty" + ) if old_version != new_version: major_version = new_version.split(".")[0] diff --git a/pkgs/development/tools/parsing/tree-sitter/update_impl.py b/pkgs/development/tools/parsing/tree-sitter/update_impl.py index 3133aa3902af1..3b53f7466ee74 100644 --- a/pkgs/development/tools/parsing/tree-sitter/update_impl.py +++ b/pkgs/development/tools/parsing/tree-sitter/update_impl.py @@ -26,14 +26,11 @@ def atomically_write(file_path: str, content: bytes) -> None: with NamedTemporaryFile( # write to the parent dir, so that it’s guaranteed to be on the same filesystem dir=os.path.dirname(file_path), - delete=False + delete=False, ) as tmp: try: tmp.write(content) - os.rename( - src=tmp.name, - dst=file_path - ) + os.rename(src=tmp.name, dst=file_path) except Exception: os.unlink(tmp.name) @@ -104,7 +101,7 @@ def fetchRepo() -> None: out = run_cmd( curl_github_args( token, - url=f"https://api.github.com/repos/{quote(orga)}/{quote(repo)}/releases/latest" + url=f"https://api.github.com/repos/{quote(orga)}/{quote(repo)}/releases/latest", ) ) release: str @@ -120,21 +117,20 @@ def fetchRepo() -> None: case {"tag_name": tag_name}: release = tag_name case _: - sys.exit(f"git result for {orga}/{repo} did not have a `tag_name` field") + sys.exit( + f"git result for {orga}/{repo} did not have a `tag_name` field" + ) log(f"Fetching latest release ({release}) of {orga}/{repo} …") res = run_cmd( nix_prefetch_git_args( url=f"https://github.com/{quote(orga)}/{quote(repo)}", - version_rev=release + version_rev=release, ) ) atomically_write( - file_path=os.path.join( - outputDir, - f"{nixRepoAttrName}.json" - ), - content=res + file_path=os.path.join(outputDir, f"{nixRepoAttrName}.json"), + content=res, ) case _: sys.exit("input json must have `orga` and `repo` keys") @@ -145,8 +141,7 @@ def fetchOrgaLatestRepos(orga: str) -> set[str]: token: str | None = os.environ.get("GITHUB_TOKEN", None) out = run_cmd( curl_github_args( - token, - url=f"https://api.github.com/orgs/{quote(orga)}/repos?per_page=100" + token, url=f"https://api.github.com/orgs/{quote(orga)}/repos?per_page=100" ) ) match curl_result(out): @@ -176,12 +171,7 @@ def checkTreeSitterRepos(latest_github_repos: set[str]) -> None: Grammar = TypedDict( "Grammar", - { - "nixRepoAttrName": str, - "orga": str, - "repo": str, - "branch": Optional[str] - } + {"nixRepoAttrName": str, "orga": str, "repo": str, "branch": Optional[str]}, ) @@ -200,11 +190,8 @@ def file() -> Iterator[str]: yield "" atomically_write( - file_path=os.path.join( - outputDir, - "default.nix" - ), - content="\n".join(file()).encode() + file_path=os.path.join(outputDir, "default.nix"), + content="\n".join(file()).encode(), ) diff --git a/pkgs/games/minecraft-servers/update.py b/pkgs/games/minecraft-servers/update.py index f272c8b71a845..e0a02c0aede94 100755 --- a/pkgs/games/minecraft-servers/update.py +++ b/pkgs/games/minecraft-servers/update.py @@ -118,8 +118,12 @@ def get_latest_major_releases(releases: List[Version]) -> Dict[str, Version]: """ return { major_release: max( - (release for release in releases if get_major_release(release.id) == major_release), - key=lambda x: tuple(map(int, x.id.split('.'))), + ( + release + for release in releases + if get_major_release(release.id) == major_release + ), + key=lambda x: tuple(map(int, x.id.split("."))), ) for major_release in group_major_releases(releases) } diff --git a/pkgs/games/papermc/update.py b/pkgs/games/papermc/update.py index 7bfc8ee3a0b73..fdf3c9e876fb4 100755 --- a/pkgs/games/papermc/update.py +++ b/pkgs/games/papermc/update.py @@ -47,16 +47,19 @@ def fetch_versions(self, not_before_minor_version: int = 18): # we only want versions that are no pre-releases release_versions = filter( - lambda v_name: all(s not in v_name for s in ["pre", "rc"]), response.json()["versions"]) + lambda v_name: all(s not in v_name for s in ["pre", "rc"]), + response.json()["versions"], + ) for version_name in release_versions: - # split version string, convert to list ot int version_split = version_name.split(".") version_split = list(map(int, version_split)) # check if version is higher than 1. - if (version_split[0] > 1) or (version_split[0] == 1 and version_split[1] >= not_before_minor_version): + if (version_split[0] > 1) or ( + version_split[0] == 1 and version_split[1] >= not_before_minor_version + ): self.versions.append(Version(version_name)) def fetch_latest_version_builds(self): @@ -77,7 +80,7 @@ def fetch_latest_version_builds(self): return # the highest build in response.json()['builds']: - latest_build = response.json()['builds'][-1] + latest_build = response.json()["builds"][-1] version.build_number = latest_build def generate_version_hashes(self): @@ -91,21 +94,25 @@ def generate_version_hashes(self): def versions_to_json(self): return json.dumps( - {version.name: {'hash': version.hash, 'version': version.full_name} - for version in self.versions}, - indent=4 + { + version.name: {"hash": version.hash, "version": version.full_name} + for version in self.versions + }, + indent=4, ) def find_version_json() -> str: """ Find the versions.json file in the same directory as this script """ - return os.path.join(os.path.dirname(os.path.realpath(__file__)), "versions.json") + return os.path.join( + os.path.dirname(os.path.realpath(__file__)), "versions.json" + ) def write_versions(self, file_name: str = find_version_json()): - """ write all processed versions to json """ + """write all processed versions to json""" # save json to versions.json - with open(file_name, 'w') as f: + with open(file_name, "w") as f: f.write(self.versions_to_json() + "\n") @staticmethod @@ -135,7 +142,7 @@ def download_and_generate_sha256_hash(url: str) -> str | None: hash_value = sha256_hash.digest() # Encode the hash value in base64 - base64_hash = base64.b64encode(hash_value).decode('utf-8') + base64_hash = base64.b64encode(hash_value).decode("utf-8") # Format it as "sha256-{base64_hash}" sri_representation = f"sha256-{base64_hash}" @@ -143,7 +150,7 @@ def download_and_generate_sha256_hash(url: str) -> str | None: return sri_representation -if __name__ == '__main__': +if __name__ == "__main__": version_manager = VersionManager() version_manager.fetch_versions() diff --git a/pkgs/os-specific/linux/device-tree/apply_overlays.py b/pkgs/os-specific/linux/device-tree/apply_overlays.py index 8b9f6f33dc3e1..bd9313677a916 100644 --- a/pkgs/os-specific/linux/device-tree/apply_overlays.py +++ b/pkgs/os-specific/linux/device-tree/apply_overlays.py @@ -5,7 +5,13 @@ from pathlib import Path import shutil -from libfdt import Fdt, FdtException, FDT_ERR_NOSPACE, FDT_ERR_NOTFOUND, fdt_overlay_apply +from libfdt import ( + Fdt, + FdtException, + FDT_ERR_NOSPACE, + FDT_ERR_NOTFOUND, + fdt_overlay_apply, +) @dataclass @@ -57,7 +63,10 @@ def apply_overlay(dt: Fdt, dto: Fdt) -> Fdt: raise FdtException(err) -def process_dtb(rel_path: Path, source: Path, destination: Path, overlays_data: list[Overlay]): + +def process_dtb( + rel_path: Path, source: Path, destination: Path, overlays_data: list[Overlay] +): source_dt = source / rel_path print(f"Processing source device tree {rel_path}...") with source_dt.open("rb") as fd: @@ -70,9 +79,13 @@ def process_dtb(rel_path: Path, source: Path, destination: Path, overlays_data: dt_compatible = get_compatible(dt) if len(dt_compatible) == 0: - print(f" Device tree {rel_path} has no compatible string set. Assuming it's compatible with overlay") + print( + f" Device tree {rel_path} has no compatible string set. Assuming it's compatible with overlay" + ) elif not overlay.compatible.intersection(dt_compatible): - print(f" Skipping overlay {overlay.name}: {overlay.compatible} is incompatible with {dt_compatible}") + print( + f" Skipping overlay {overlay.name}: {overlay.compatible} is incompatible with {dt_compatible}" + ) continue print(f" Applying overlay {overlay.name}") @@ -84,11 +97,16 @@ def process_dtb(rel_path: Path, source: Path, destination: Path, overlays_data: with dest_path.open("wb") as fd: fd.write(dt.as_bytearray()) + def main(): - parser = ArgumentParser(description='Apply a list of overlays to a directory of device trees') + parser = ArgumentParser( + description="Apply a list of overlays to a directory of device trees" + ) parser.add_argument("--source", type=Path, help="Source directory") parser.add_argument("--destination", type=Path, help="Destination directory") - parser.add_argument("--overlays", type=Path, help="JSON file with overlay descriptions") + parser.add_argument( + "--overlays", type=Path, help="JSON file with overlay descriptions" + ) args = parser.parse_args() source: Path = args.source @@ -117,5 +135,5 @@ def main(): shutil.copy(source / rel_path, dest_path) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pkgs/os-specific/linux/kernel/hardened/update.py b/pkgs/os-specific/linux/kernel/hardened/update.py index 3e12a0e2dd5de..018eb77ca1df1 100755 --- a/pkgs/os-specific/linux/kernel/hardened/update.py +++ b/pkgs/os-specific/linux/kernel/hardened/update.py @@ -33,12 +33,17 @@ Version = List[VersionComponent] -PatchData = TypedDict("PatchData", {"name": str, "url": str, "sha256": str, "extra": str}) -Patch = TypedDict("Patch", { - "patch": PatchData, - "version": str, - "sha256": str, -}) +PatchData = TypedDict( + "PatchData", {"name": str, "url": str, "sha256": str, "extra": str} +) +Patch = TypedDict( + "Patch", + { + "patch": PatchData, + "version": str, + "sha256": str, + }, +) def read_min_kernel_branch() -> List[str]: @@ -88,7 +93,11 @@ def nix_prefetch_url(url: str) -> Tuple[str, Path]: def verify_openpgp_signature( - *, name: str, trusted_key: Path, sig_path: Path, data_path: Path, + *, + name: str, + trusted_key: Path, + sig_path: Path, + data_path: Path, ) -> bool: with TemporaryDirectory(suffix=".nixpkgs-gnupg-home") as gnupg_home_str: gnupg_home = Path(gnupg_home_str) @@ -113,7 +122,7 @@ def verify_openpgp_signature( def fetch_patch(*, name: str, release_info: ReleaseInfo) -> Optional[Patch]: release = release_info.release - extra = f'-{release_info.version[-1]}' + extra = f"-{release_info.version[-1]}" def find_asset(filename: str) -> str: try: @@ -145,24 +154,30 @@ def find_asset(filename: str) -> str: if not sig_ok: return None - kernel_ver = re.sub(r"v?(.*)(-hardened[\d]+)$", r'\1', release_info.release.tag_name) - major = kernel_ver.split('.')[0] - sha256_kernel, _ = nix_prefetch_url(f"mirror://kernel/linux/kernel/v{major}.x/linux-{kernel_ver}.tar.xz") + kernel_ver = re.sub( + r"v?(.*)(-hardened[\d]+)$", r"\1", release_info.release.tag_name + ) + major = kernel_ver.split(".")[0] + sha256_kernel, _ = nix_prefetch_url( + f"mirror://kernel/linux/kernel/v{major}.x/linux-{kernel_ver}.tar.xz" + ) return Patch( patch=PatchData(name=patch_filename, url=patch_url, sha256=sha256, extra=extra), version=kernel_ver, - sha256=sha256_kernel + sha256=sha256_kernel, ) -def normalize_kernel_version(version_str: str) -> list[str|int]: +def normalize_kernel_version(version_str: str) -> list[str | int]: # There have been two variants v6.10[..] and 6.10[..], drop the v - version_str_without_v = version_str[1:] if not version_str[0].isdigit() else version_str + version_str_without_v = ( + version_str[1:] if not version_str[0].isdigit() else version_str + ) - version: list[str|int] = [] + version: list[str | int] = [] - for component in re.split(r'\.|\-', version_str_without_v): + for component in re.split(r"\.|\-", version_str_without_v): try: version.append(int(component)) except ValueError: @@ -174,7 +189,7 @@ def version_string(version: Version) -> str: return ".".join(str(component) for component in version) -def major_kernel_version_key(kernel_version: list[int|str]) -> str: +def major_kernel_version_key(kernel_version: list[int | str]) -> str: return version_string(kernel_version[:-1]) @@ -211,7 +226,9 @@ def commit_patches(*, kernel_key: Version, message: str) -> None: if version != "testing" } - latest_lts = sorted(ver for ver, meta in kernels.items() if meta.get("lts", False))[-1] + latest_lts = sorted(ver for ver, meta in kernels.items() if meta.get("lts", False))[ + -1 + ] keys = sorted(kernels.keys()) latest_release = keys[-1] fallback = keys[-2] @@ -268,7 +285,7 @@ def commit_patches(*, kernel_key: Version, message: str) -> None: version_str = release.tag_name name = f"linux-hardened-{version_str}" - old_version: Optional[list[int|str]] = None + old_version: Optional[list[int | str]] = None old_version_str: Optional[str] = None update: bool try: diff --git a/pkgs/os-specific/linux/kernel/update-mainline.py b/pkgs/os-specific/linux/kernel/update-mainline.py index 9f61236e67fa4..e4047167f9a4c 100755 --- a/pkgs/os-specific/linux/kernel/update-mainline.py +++ b/pkgs/os-specific/linux/kernel/update-mainline.py @@ -113,13 +113,12 @@ def main(): releases = release_table.find_all("tr") parsed_releases = [ - parsed for release in releases - if (parsed := parse_release(release)) is not None + parsed for release in releases if (parsed := parse_release(release)) is not None ] all_kernels = json.load(VERSIONS_FILE.open()) oldest_branch = get_oldest_branch(all_kernels) - for (branch, kernels) in groupby(parsed_releases, lambda kernel: kernel.branch): + for branch, kernels in groupby(parsed_releases, lambda kernel: kernel.branch): kernel = max(kernels, key=lambda kernel: kernel.parsed_version) nixpkgs_branch = branch.replace(".", "_") @@ -131,16 +130,13 @@ def main(): if predates_oldest_branch(oldest_branch, kernel.branch): print( f"{kernel.branch} is too old and not supported anymore, skipping...", - file=sys.stderr + file=sys.stderr, ) continue if old_version is None: if kernel.eol: - print( - f"{kernel.branch} is EOL, not adding...", - file=sys.stderr - ) + print(f"{kernel.branch} is EOL, not adding...", file=sys.stderr) continue message = f"linux_{nixpkgs_branch}: init at {kernel.version}" diff --git a/pkgs/os-specific/linux/kernel/update-zen.py b/pkgs/os-specific/linux/kernel/update-zen.py index 5fe19ef8cb846..632edcf44a789 100755 --- a/pkgs/os-specific/linux/kernel/update-zen.py +++ b/pkgs/os-specific/linux/kernel/update-zen.py @@ -17,45 +17,51 @@ def panic(exc): DIR = os.path.dirname(os.path.abspath(__file__)) -HEADERS = {'Accept': 'application/vnd.github.v3+json'} +HEADERS = {"Accept": "application/vnd.github.v3+json"} def github_api_request(endpoint): - base_url = 'https://api.github.com/' + base_url = "https://api.github.com/" request = Request(base_url + endpoint, headers=HEADERS) with urlopen(request) as http_response: - return json.loads(http_response.read().decode('utf-8')) + return json.loads(http_response.read().decode("utf-8")) def get_commit_date(repo, sha): - url = f'https://api.github.com/repos/{repo}/commits/{sha}' + url = f"https://api.github.com/repos/{repo}/commits/{sha}" request = Request(url, headers=HEADERS) with urlopen(request) as http_response: commit = json.loads(http_response.read().decode()) - date = commit['commit']['committer']['date'].rstrip('Z') + date = commit["commit"]["committer"]["date"].rstrip("Z") date = datetime.fromisoformat(date).date().isoformat() - return 'unstable-' + date + return "unstable-" + date def nix_prefetch_git(url, rev): """Prefetches the requested Git revision (incl. submodules) of the given repository URL.""" - print(f'nix-prefetch-git {url} {rev}') - out = subprocess.check_output([ - 'nix-prefetch-git', '--quiet', - '--url', url, - '--rev', rev, - '--fetch-submodules']) - return json.loads(out)['sha256'] + print(f"nix-prefetch-git {url} {rev}") + out = subprocess.check_output( + [ + "nix-prefetch-git", + "--quiet", + "--url", + url, + "--rev", + rev, + "--fetch-submodules", + ] + ) + return json.loads(out)["sha256"] def nix_prefetch_url(url, unpack=False): """Prefetches the content of the given URL.""" - print(f'nix-prefetch-url {url}') - options = ['--type', 'sha256'] + print(f"nix-prefetch-url {url}") + options = ["--type", "sha256"] if unpack: - options += ['--unpack'] - out = subprocess.check_output(['nix-prefetch-url'] + options + [url]) - return out.decode('utf-8').rstrip() + options += ["--unpack"] + out = subprocess.check_output(["nix-prefetch-url"] + options + [url]) + return out.decode("utf-8").rstrip() def update_file(relpath, variant, version, suffix, sha256): @@ -64,27 +70,30 @@ def update_file(relpath, variant, version, suffix, sha256): for line in f: result = line result = re.sub( - fr'^ version = ".+"; # {variant}', + rf'^ version = ".+"; # {variant}', f' version = "{version}"; # {variant}', - result) + result, + ) result = re.sub( - fr'^ suffix = ".+"; # {variant}', + rf'^ suffix = ".+"; # {variant}', f' suffix = "{suffix}"; # {variant}', - result) + result, + ) result = re.sub( - fr'^ sha256 = ".+"; # {variant}', + rf'^ sha256 = ".+"; # {variant}', f' sha256 = "{sha256}"; # {variant}', - result) - print(result, end='') + result, + ) + print(result, end="") def read_file(relpath, variant): file_path = os.path.join(DIR, relpath) - re_version = re.compile(fr'^\s*version = "(.+)"; # {variant}') - re_suffix = re.compile(fr'^\s*suffix = "(.+)"; # {variant}') + re_version = re.compile(rf'^\s*version = "(.+)"; # {variant}') + re_suffix = re.compile(rf'^\s*suffix = "(.+)"; # {variant}') version = None suffix = None - with fileinput.FileInput(file_path, mode='r') as f: + with fileinput.FileInput(file_path, mode="r") as f: for line in f: version_match = re_version.match(line) if version_match: @@ -107,16 +116,18 @@ def read_file(relpath, variant): variant = sys.argv[1] if variant not in ("zen", "lqx"): panic(f"Unexepected variant instead of 'zen' or 'lqx': {sys.argv[1]}") - pattern = re.compile(fr"v(\d+\.\d+\.?\d*)-({variant}\d+)") - zen_tags = github_api_request('repos/zen-kernel/zen-kernel/releases') + pattern = re.compile(rf"v(\d+\.\d+\.?\d*)-({variant}\d+)") + zen_tags = github_api_request("repos/zen-kernel/zen-kernel/releases") for tag in zen_tags: - zen_match = pattern.match(tag['tag_name']) + zen_match = pattern.match(tag["tag_name"]) if zen_match: zen_tag = zen_match.group(0) zen_version = zen_match.group(1) zen_suffix = zen_match.group(2) break - old_version, old_suffix = read_file('zen-kernels.nix', variant) + old_version, old_suffix = read_file("zen-kernels.nix", variant) if old_version != zen_version or old_suffix != zen_suffix: - zen_hash = nix_prefetch_git('https://github.com/zen-kernel/zen-kernel.git', zen_tag) - update_file('zen-kernels.nix', variant, zen_version, zen_suffix, zen_hash) + zen_hash = nix_prefetch_git( + "https://github.com/zen-kernel/zen-kernel.git", zen_tag + ) + update_file("zen-kernels.nix", variant, zen_version, zen_suffix, zen_hash) diff --git a/pkgs/servers/asterisk/update.py b/pkgs/servers/asterisk/update.py index 8a127be3952e6..0692462d1eea0 100755 --- a/pkgs/servers/asterisk/update.py +++ b/pkgs/servers/asterisk/update.py @@ -11,7 +11,11 @@ page = requests.get(URL) changelog = re.compile(r"^ChangeLog-\d+\.\d+\.\d+\.md$") -changelogs = [a.get_text() for a in BeautifulSoup(page.text, 'html.parser').find_all('a') if changelog.match(a.get_text())] +changelogs = [ + a.get_text() + for a in BeautifulSoup(page.text, "html.parser").find_all("a") + if changelog.match(a.get_text()) +] major_versions = {} for changelog in changelogs: v = version.parse(changelog.removeprefix("ChangeLog-").removesuffix(".md")) @@ -21,10 +25,7 @@ for mv in major_versions.keys(): v = max(major_versions[mv]) sha = requests.get(f"{URL}/asterisk-{v}.sha256").text.split()[0] - out["asterisk_" + str(mv)] = { - "version": str(v), - "sha256": sha - } + out["asterisk_" + str(mv)] = {"version": str(v), "sha256": sha} versions_path = Path(sys.argv[0]).parent / "versions.json" diff --git a/pkgs/servers/dict/wiktionary/latest_version.py b/pkgs/servers/dict/wiktionary/latest_version.py index 1aa767851ceed..d63307f5940c8 100644 --- a/pkgs/servers/dict/wiktionary/latest_version.py +++ b/pkgs/servers/dict/wiktionary/latest_version.py @@ -4,39 +4,44 @@ from os.path import abspath, dirname from urllib.request import urlopen + class WiktionaryLatestVersionParser(HTMLParser): def __init__(self, current_version, *args, **kwargs): self.latest_version = current_version super().__init__(*args, **kwargs) - def handle_starttag(self, tag, attrs): - if tag != 'a': + if tag != "a": return - href = dict(attrs)['href'][0:-1] - if href == 'latest': + href = dict(attrs)["href"][0:-1] + if href == "latest": return self.latest_version = max(self.latest_version, href) -def nix_prefetch_url(url, algo='sha256'): +def nix_prefetch_url(url, algo="sha256"): """Prefetches the content of the given URL.""" - print(f'nix-prefetch-url {url}') - out = subprocess.check_output(['nix-prefetch-url', '--type', algo, url]) + print(f"nix-prefetch-url {url}") + out = subprocess.check_output(["nix-prefetch-url", "--type", algo, url]) return out.rstrip() -current_version = subprocess.check_output([ - 'nix', 'eval', '--raw', - '-f', dirname(abspath(__file__)) + '/../../../..', - 'dictdDBs.wiktionary.version', -]) +current_version = subprocess.check_output( + [ + "nix", + "eval", + "--raw", + "-f", + dirname(abspath(__file__)) + "/../../../..", + "dictdDBs.wiktionary.version", + ] +) parser = WiktionaryLatestVersionParser(current_version) -with urlopen('https://dumps.wikimedia.org/enwiktionary/') as resp: +with urlopen("https://dumps.wikimedia.org/enwiktionary/") as resp: parser.feed(resp.read()) print(parser.latest_version) diff --git a/pkgs/servers/dict/wiktionary/wiktionary2dict.py b/pkgs/servers/dict/wiktionary/wiktionary2dict.py index 03cd8538c2fae..ef7ce8245dfa2 100644 --- a/pkgs/servers/dict/wiktionary/wiktionary2dict.py +++ b/pkgs/servers/dict/wiktionary/wiktionary2dict.py @@ -7,55 +7,78 @@ import time import xml.sax + class Text: def __init__(self, s): self.s = s + def process(self): return s + class TemplateCall: def __init__(self): pass + def process(self): pass + class Template: def __init__(self): self.parts = [] + def append(self, part): self.parts.append(part) + def process(self): - return ''.join(x.process() for x in self.parts) + return "".join(x.process() for x in self.parts) + class Whitespace: def __init__(self, s): self.s = s -class OpenDouble: pass -class OpenTriple: pass -class CloseDouble: pass -class CloseTriple: pass + +class OpenDouble: + pass + + +class OpenTriple: + pass + + +class CloseDouble: + pass + + +class CloseTriple: + pass + class Equals: def __str__(self): return "=" + class Delimiter: def __init__(self, c): self.c = c + def __str__(self): return self.c + def Tokenise(s): s = str(s) stack = [] last = 0 i = 0 while i < len(s): - if s[i] == '{' and i+1 < len(s) and s[i+1] == '{': + if s[i] == "{" and i + 1 < len(s) and s[i + 1] == "{": if i > last: yield s[last:i] - if i+2 < len(s) and s[i+2] == '{': + if i + 2 < len(s) and s[i + 2] == "{": yield OpenTriple() stack.append(3) i += 3 @@ -64,7 +87,7 @@ def Tokenise(s): stack.append(2) i += 2 last = i - elif s[i] == '}' and i+1 < len(s) and s[i+1] == '}': + elif s[i] == "}" and i + 1 < len(s) and s[i + 1] == "}": if i > last: yield s[last:i] if len(stack) == 0: @@ -74,26 +97,26 @@ def Tokenise(s): yield CloseDouble() i += 2 stack.pop() - elif i+2 < len(s) and s[i+2] == '}': + elif i + 2 < len(s) and s[i + 2] == "}": yield CloseTriple() i += 3 stack.pop() else: raise SyntaxError() last = i - elif s[i] == ':' or s[i] == '|': + elif s[i] == ":" or s[i] == "|": if i > last: yield s[last:i] yield Delimiter(s[i]) i += 1 last = i - elif s[i] == '=': + elif s[i] == "=": if i > last: yield s[last:i] yield Equals() i += 1 last = i - #elif s[i] == ' ' or s[i] == '\t' or s[i] == '\n': + # elif s[i] == ' ' or s[i] == '\t' or s[i] == '\n': # if i > last: # yield s[last:i] # last = i @@ -107,6 +130,7 @@ def Tokenise(s): if i > last: yield s[last:i] + def processSub(templates, tokens, args): t = next(tokens) if not isinstance(t, str): @@ -114,7 +138,7 @@ def processSub(templates, tokens, args): name = t t = next(tokens) default = None - if isinstance(t, Delimiter) and t.c == '|': + if isinstance(t, Delimiter) and t.c == "|": default = "" while True: t = next(tokens) @@ -137,6 +161,7 @@ def processSub(templates, tokens, args): return "en" return "{{{%s}}}" % name + def processTemplateCall(templates, tokens, args): template = tokens.next().strip().lower() args = {} @@ -154,7 +179,7 @@ def processTemplateCall(templates, tokens, args): arg += processTemplateCall(templates, tokens, args) elif isinstance(t, OpenTriple): arg += processSub(templates, tokens, args) - elif isinstance(t, Delimiter) and t.c != '|': + elif isinstance(t, Delimiter) and t.c != "|": arg += str(t) else: break @@ -169,7 +194,7 @@ def processTemplateCall(templates, tokens, args): arg += processTemplateCall(templates, tokens, args) elif isinstance(t, OpenTriple): arg += processSub(templates, tokens, args) - elif isinstance(t, Delimiter) and t.c != '|': + elif isinstance(t, Delimiter) and t.c != "|": arg += str(t) else: break @@ -182,26 +207,26 @@ def processTemplateCall(templates, tokens, args): else: print("Unexpected:", t) raise SyntaxError - #print template, args - if template[0] == '#': + # print template, args + if template[0] == "#": if template == "#if": - if args['1'].strip(): - return args['2'] - elif '3' in args: - return args['3'] + if args["1"].strip(): + return args["2"] + elif "3" in args: + return args["3"] else: return "" elif template == "#ifeq": - if args['1'].strip() == args['2'].strip(): - return args['3'] - elif '4' in args: - return args['4'] + if args["1"].strip() == args["2"].strip(): + return args["3"] + elif "4" in args: + return args["4"] else: return "" elif template == "#ifexist": return "" elif template == "#switch": - sw = args['1'].strip() + sw = args["1"].strip() if sw in args: return args[sw] else: @@ -213,14 +238,15 @@ def processTemplateCall(templates, tokens, args): return "{{%s}}" % template return process(templates, templates[template], args) -def process(templates, s, args = {}): + +def process(templates, s, args={}): s = re.compile(r"", re.DOTALL).sub("", s) s = re.compile(r".*?", re.DOTALL).sub("", s) assert "" not in s - #s = re.sub(r"(.*?)(.*?)(.*)", r"\1", s) + # s = re.sub(r"(.*?)(.*?)(.*)", r"\1", s) s = re.compile(r"(.*?)", re.DOTALL).sub(r"\1", s) r = "" - #print list(Tokenise(s)) + # print list(Tokenise(s)) tokens = Tokenise(s) try: while True: @@ -235,19 +261,21 @@ def process(templates, s, args = {}): pass return r + def test(): templates = { - 'lb': "{{", - 'name-example': "I am a template example, my first name is '''{{{firstName}}}''' and my last name is '''{{{lastName}}}'''. You can reference my page at [[{{{lastName}}}, {{{firstName}}}]].", - 't': "start-{{{1|pqr}}}-end", - 't0': "start-{{{1}}}-end", - 't1': "start{{{1}}}endmoo", - 't2a1': "{{t2demo|a|{{{1}}}}}", - 't2a2': "{{t2demo|a|2={{{1}}}}}", - 't2demo': "start-{{{1}}}-middle-{{{2}}}-end", - 't5': "{{t2demo|{{{a}}}=b}}", - 't6': "t2demo|a", + "lb": "{{", + "name-example": "I am a template example, my first name is '''{{{firstName}}}''' and my last name is '''{{{lastName}}}'''. You can reference my page at [[{{{lastName}}}, {{{firstName}}}]].", + "t": "start-{{{1|pqr}}}-end", + "t0": "start-{{{1}}}-end", + "t1": "start{{{1}}}endmoo", + "t2a1": "{{t2demo|a|{{{1}}}}}", + "t2a2": "{{t2demo|a|2={{{1}}}}}", + "t2demo": "start-{{{1}}}-middle-{{{2}}}-end", + "t5": "{{t2demo|{{{a}}}=b}}", + "t6": "t2demo|a", } + def t(text, expected): print("text:", text) s = process(templates, text) @@ -255,8 +283,15 @@ def t(text, expected): print("got:", s) print("expected:", expected) sys.exit(1) - t("{{Name-example}}", "I am a template example, my first name is '''{{{firstName}}}''' and my last name is '''{{{lastName}}}'''. You can reference my page at [[{{{lastName}}}, {{{firstName}}}]].") - t("{{Name-example | firstName=John | lastName=Smith }}", "I am a template example, my first name is '''John''' and my last name is '''Smith'''. You can reference my page at [[Smith, John]].") + + t( + "{{Name-example}}", + "I am a template example, my first name is '''{{{firstName}}}''' and my last name is '''{{{lastName}}}'''. You can reference my page at [[{{{lastName}}}, {{{firstName}}}]].", + ) + t( + "{{Name-example | firstName=John | lastName=Smith }}", + "I am a template example, my first name is '''John''' and my last name is '''Smith'''. You can reference my page at [[Smith, John]].", + ) t("{{t0|a}}", "start-a-end") t("{{t0| }}", "start- -end") t("{{t0|}}", "start--end") @@ -279,208 +314,209 @@ def t(text, expected): t("{{T|{{T|a=b}}}}", "start-start-pqr-end-end") t("{{T|a=b}}", "start-pqr-end") t("{{T|1=a=b}}", "start-a=b-end") - #t("{{t1|{{lb}}tc}}}}", "start{{tcend}}") - #t("{{t2a1|1=x=y}}", "start-a-middle-{{{2}}}-end") - #t("{{t2a2|1=x=y}}", "start-a-middle-x=y-end") - #t("{{t5|a=2=d}}", "start-{{{1}}}-middle-d=b-end") - #t("{{ {{t6}} }}", "{{ t2demo|a }}") + # t("{{t1|{{lb}}tc}}}}", "start{{tcend}}") + # t("{{t2a1|1=x=y}}", "start-a-middle-{{{2}}}-end") + # t("{{t2a2|1=x=y}}", "start-a-middle-x=y-end") + # t("{{t5|a=2=d}}", "start-{{{1}}}-middle-d=b-end") + # t("{{ {{t6}} }}", "{{ t2demo|a }}") t("{{t|[[a|b]]}}", "start-b-end") t("{{t|[[a|b]] }}", "start-b -end") + Parts = { # Standard POS headers - 'noun': "n.", - 'Noun': "n.", - 'Noun 1': "n.", - 'Noun 2': "n.", - 'Verb': "v.", - 'Adjective': "adj.", - 'Adverb': "adv.", - 'Pronoun': "pron.", - 'Conjunction': "conj.", - 'Interjection': "interj.", - 'Preposition': "prep.", - 'Proper noun': "n.p.", - 'Proper Noun': "n.p.", - 'Article': "art.", - + "noun": "n.", + "Noun": "n.", + "Noun 1": "n.", + "Noun 2": "n.", + "Verb": "v.", + "Adjective": "adj.", + "Adverb": "adv.", + "Pronoun": "pron.", + "Conjunction": "conj.", + "Interjection": "interj.", + "Preposition": "prep.", + "Proper noun": "n.p.", + "Proper Noun": "n.p.", + "Article": "art.", # Standard non-POS level 3 headers - '{{acronym}}': "acr.", - 'Acronym': "acr.", - '{{abbreviation}}': "abbr.", - '[[Abbreviation]]': "abbr.", - 'Abbreviation': "abbr.", - '[[initialism]]': "init.", - '{{initialism}}': "init.", - 'Initialism': "init.", - 'Contraction': "cont.", - 'Prefix': "prefix", - 'Suffix': "suffix", - 'Symbol': "sym.", - 'Letter': "letter", - 'Idiom': "idiom", - 'Idioms': "idiom", - 'Phrase': "phrase", - + "{{acronym}}": "acr.", + "Acronym": "acr.", + "{{abbreviation}}": "abbr.", + "[[Abbreviation]]": "abbr.", + "Abbreviation": "abbr.", + "[[initialism]]": "init.", + "{{initialism}}": "init.", + "Initialism": "init.", + "Contraction": "cont.", + "Prefix": "prefix", + "Suffix": "suffix", + "Symbol": "sym.", + "Letter": "letter", + "Idiom": "idiom", + "Idioms": "idiom", + "Phrase": "phrase", # Debated POS level 3 headers - 'Number': "num.", - 'Numeral': "num.", - 'Cardinal number': "num.", - 'Ordinal number': "num.", - 'Cardinal numeral': "num.", - 'Ordinal numeral': "num.", - + "Number": "num.", + "Numeral": "num.", + "Cardinal number": "num.", + "Ordinal number": "num.", + "Cardinal numeral": "num.", + "Ordinal numeral": "num.", # Other headers in use - 'Personal pronoun': "pers.pron.", - 'Adjective/Adverb': "adj./adv.", - 'Proper adjective': "prop.adj.", - 'Determiner': "det.", - 'Demonstrative determiner': "dem.det.", - 'Clitic': "clitic", - 'Infix': "infix", - 'Counter': "counter", - 'Kanji': None, - 'Kanji reading': None, - 'Hiragana letter': None, - 'Katakana letter': None, - 'Pinyin': None, - 'Han character': None, - 'Hanzi': None, - 'Hanja': None, - 'Proverb': "prov.", - 'Expression': None, - 'Adjectival noun': None, - 'Quasi-adjective': None, - 'Particle': "part.", - 'Infinitive particle': "part.", - 'Possessive adjective': "poss.adj.", - 'Verbal prefix': "v.p.", - 'Postposition': "post.", - 'Prepositional article': "prep.art.", - 'Phrasal verb': "phr.v.", - 'Participle': "participle", - 'Interrogative auxiliary verb': "int.aux.v.", - 'Pronominal adverb': "pron.adv.", - 'Adnominal': "adn.", - 'Abstract pronoun': "abs.pron.", - 'Conjunction particle': None, - 'Root': "root", - + "Personal pronoun": "pers.pron.", + "Adjective/Adverb": "adj./adv.", + "Proper adjective": "prop.adj.", + "Determiner": "det.", + "Demonstrative determiner": "dem.det.", + "Clitic": "clitic", + "Infix": "infix", + "Counter": "counter", + "Kanji": None, + "Kanji reading": None, + "Hiragana letter": None, + "Katakana letter": None, + "Pinyin": None, + "Han character": None, + "Hanzi": None, + "Hanja": None, + "Proverb": "prov.", + "Expression": None, + "Adjectival noun": None, + "Quasi-adjective": None, + "Particle": "part.", + "Infinitive particle": "part.", + "Possessive adjective": "poss.adj.", + "Verbal prefix": "v.p.", + "Postposition": "post.", + "Prepositional article": "prep.art.", + "Phrasal verb": "phr.v.", + "Participle": "participle", + "Interrogative auxiliary verb": "int.aux.v.", + "Pronominal adverb": "pron.adv.", + "Adnominal": "adn.", + "Abstract pronoun": "abs.pron.", + "Conjunction particle": None, + "Root": "root", # Non-standard, deprecated headers - 'Noun form': "n.", - 'Verb form': "v.", - 'Adjective form': "adj.form.", - 'Nominal phrase': "nom.phr.", - 'Noun phrase': "n. phrase", - 'Verb phrase': "v. phrase", - 'Transitive verb': "v.t.", - 'Intransitive verb': "v.i.", - 'Reflexive verb': "v.r.", - 'Cmavo': None, - 'Romaji': "rom.", - 'Hiragana': None, - 'Furigana': None, - 'Compounds': None, - + "Noun form": "n.", + "Verb form": "v.", + "Adjective form": "adj.form.", + "Nominal phrase": "nom.phr.", + "Noun phrase": "n. phrase", + "Verb phrase": "v. phrase", + "Transitive verb": "v.t.", + "Intransitive verb": "v.i.", + "Reflexive verb": "v.r.", + "Cmavo": None, + "Romaji": "rom.", + "Hiragana": None, + "Furigana": None, + "Compounds": None, # Other headers seen - 'Alternative forms': None, - 'Alternative spellings': None, - 'Anagrams': None, - 'Antonym': None, - 'Antonyms': None, - 'Conjugation': None, - 'Declension': None, - 'Declension and pronunciations': None, - 'Definite Article': "def.art.", - 'Definite article': "def.art.", - 'Demonstrative pronoun': "dem.pron.", - 'Derivation': None, - 'Derived expression': None, - 'Derived expressions': None, - 'Derived forms': None, - 'Derived phrases': None, - 'Derived terms': None, - 'Derived, Related terms': None, - 'Descendants': None, + "Alternative forms": None, + "Alternative spellings": None, + "Anagrams": None, + "Antonym": None, + "Antonyms": None, + "Conjugation": None, + "Declension": None, + "Declension and pronunciations": None, + "Definite Article": "def.art.", + "Definite article": "def.art.", + "Demonstrative pronoun": "dem.pron.", + "Derivation": None, + "Derived expression": None, + "Derived expressions": None, + "Derived forms": None, + "Derived phrases": None, + "Derived terms": None, + "Derived, Related terms": None, + "Descendants": None, #'Etymology': None, #'Etymology 1': None, #'Etymology 2': None, #'Etymology 3': None, #'Etymology 4': None, #'Etymology 5': None, - 'Examples': None, - 'External links': None, - '[[Gismu]]': None, - 'Gismu': None, - 'Homonyms': None, - 'Homophones': None, - 'Hyphenation': None, - 'Indefinite article': "art.", - 'Indefinite pronoun': "ind.pron.", - 'Indefinite Pronoun': "ind.pron.", - 'Indetermined pronoun': "ind.pron.", - 'Interrogative conjunction': "int.conj.", - 'Interrogative determiner': "int.det.", - 'Interrogative particle': "int.part.", - 'Interrogative pronoun': "int.pron.", - 'Legal expression': "legal", - 'Mass noun': "n.", - 'Miscellaneous': None, - 'Mutations': None, - 'Noun and verb': "n/v.", - 'Other language': None, - 'Pinyin syllable': None, - 'Possessive determiner': "poss.det.", - 'Possessive pronoun': "poss.pron.", - 'Prepositional phrase': "prep.phr.", - 'Prepositional Pronoun': "prep.pron.", - 'Pronunciation': None, - 'Pronunciation 1': None, - 'Pronunciation 2': None, - 'Quotations': None, - 'References': None, - 'Reflexive pronoun': "refl.pron.", - 'Related expressions': None, - 'Related terms': None, - 'Related words': None, - 'Relative pronoun': "rel.pron.", - 'Saying': "saying", - 'See also': None, - 'Shorthand': None, - '[http://en.wikipedia.org/wiki/Shorthand Shorthand]': None, - 'Sister projects': None, - 'Spelling note': None, - 'Synonyms': None, - 'Translation': None, - 'Translations': None, - 'Translations to be checked': None, - 'Transliteration': None, - 'Trivia': None, - 'Usage': None, - 'Usage in English': None, - 'Usage notes': None, - 'Verbal noun': "v.n.", + "Examples": None, + "External links": None, + "[[Gismu]]": None, + "Gismu": None, + "Homonyms": None, + "Homophones": None, + "Hyphenation": None, + "Indefinite article": "art.", + "Indefinite pronoun": "ind.pron.", + "Indefinite Pronoun": "ind.pron.", + "Indetermined pronoun": "ind.pron.", + "Interrogative conjunction": "int.conj.", + "Interrogative determiner": "int.det.", + "Interrogative particle": "int.part.", + "Interrogative pronoun": "int.pron.", + "Legal expression": "legal", + "Mass noun": "n.", + "Miscellaneous": None, + "Mutations": None, + "Noun and verb": "n/v.", + "Other language": None, + "Pinyin syllable": None, + "Possessive determiner": "poss.det.", + "Possessive pronoun": "poss.pron.", + "Prepositional phrase": "prep.phr.", + "Prepositional Pronoun": "prep.pron.", + "Pronunciation": None, + "Pronunciation 1": None, + "Pronunciation 2": None, + "Quotations": None, + "References": None, + "Reflexive pronoun": "refl.pron.", + "Related expressions": None, + "Related terms": None, + "Related words": None, + "Relative pronoun": "rel.pron.", + "Saying": "saying", + "See also": None, + "Shorthand": None, + "[http://en.wikipedia.org/wiki/Shorthand Shorthand]": None, + "Sister projects": None, + "Spelling note": None, + "Synonyms": None, + "Translation": None, + "Translations": None, + "Translations to be checked": None, + "Transliteration": None, + "Trivia": None, + "Usage": None, + "Usage in English": None, + "Usage notes": None, + "Verbal noun": "v.n.", } PartsUsed = {} for p in list(Parts.keys()): PartsUsed[p] = 0 + def encode(s): r = e(s) assert r[1] == len(s) return r[0] + def dowikilink(m): a = m.group(1).split("|") if len(a) > 1: link = a[1] else: link = a[0] - if ':' in link: + if ":" in link: link = "" return link + seentemplates = {} + + def dotemplate(m): aa = m.group(1).split("|") args = {} @@ -493,21 +529,21 @@ def dotemplate(m): n += 1 args[n] = am.group(1) - #if aa[0] in seentemplates: + # if aa[0] in seentemplates: # seentemplates[aa[0]] += 1 - #else: + # else: # seentemplates[aa[0]] = 1 # print len(seentemplates), aa[0] - #print aa[0] + # print aa[0] - #if aa[0] not in Templates: + # if aa[0] not in Templates: # return "(unknown template %s)" % aa[0] - #body = Templates[aa[0]] - #body = re.sub(r".*?", "", body) - #assert "" not in body + # body = Templates[aa[0]] + # body = re.sub(r".*?", "", body) + # assert "" not in body ##body = re.sub(r"(.*?)(.*?)(.*)", r"\1", body) - #body = re.sub(r"(.*?)", r"\1", body) - #def dotemplatearg(m): + # body = re.sub(r"(.*?)", r"\1", body) + # def dotemplatearg(m): # ta = m.group(1).split("|") # if ta[0] in args: # return args[ta[0]] @@ -515,8 +551,9 @@ def dotemplate(m): # return ta[1] # else: # return "{{{%s}}}" % ta[0] - #body = re.sub(r"{{{(.*?)}}}", dotemplatearg, body) - #return dewiki(body) + # body = re.sub(r"{{{(.*?)}}}", dotemplatearg, body) + # return dewiki(body) + def doparserfunction(m): a = m.group(2).split("|") @@ -527,7 +564,8 @@ def doparserfunction(m): return a[3] return "" -def dewiki(body, indent = 0): + +def dewiki(body, indent=0): # process in this order: # {{{ }}} # <> <> @@ -535,10 +573,10 @@ def dewiki(body, indent = 0): # {{ }} # ''' ''' # '' '' - #body = wikimediatemplate.process(Templates, body) + # body = wikimediatemplate.process(Templates, body) body = re.sub(r"\[\[(.*?)\]\]", dowikilink, body) - #body = re.sub(r"{{(.*?)}}", dotemplate, body) - #body = re.sub(r"{{#(.*?):(.*?)}}", doparserfunction, body) + # body = re.sub(r"{{(.*?)}}", dotemplate, body) + # body = re.sub(r"{{#(.*?):(.*?)}}", doparserfunction, body) body = re.sub(r"'''(.*?)'''", r"\1", body) body = re.sub(r"''(.*?)''", r"\1", body) lines = body.split("\n") @@ -546,48 +584,61 @@ def dewiki(body, indent = 0): i = 0 while i < len(lines): if len(lines[i]) > 0 and lines[i][0] == "#": - if len(lines[i]) > 1 and lines[i][1] == '*': - wlines = textwrap.wrap(lines[i][2:].strip(), - initial_indent = " * ", - subsequent_indent = " ") - elif len(lines[i]) > 1 and lines[i][1] == ':': - wlines = textwrap.wrap(lines[i][2:].strip(), - initial_indent = " ", - subsequent_indent = " ") + if len(lines[i]) > 1 and lines[i][1] == "*": + wlines = textwrap.wrap( + lines[i][2:].strip(), + initial_indent=" * ", + subsequent_indent=" ", + ) + elif len(lines[i]) > 1 and lines[i][1] == ":": + wlines = textwrap.wrap( + lines[i][2:].strip(), + initial_indent=" ", + subsequent_indent=" ", + ) else: n += 1 - wlines = textwrap.wrap(str(n) + ". " + lines[i][1:].strip(), - subsequent_indent = " ") + wlines = textwrap.wrap( + str(n) + ". " + lines[i][1:].strip(), subsequent_indent=" " + ) elif len(lines[i]) > 0 and lines[i][0] == "*": n = 0 - wlines = textwrap.wrap(lines[i][1:].strip(), - initial_indent = "* ", - subsequent_indent = " ") + wlines = textwrap.wrap( + lines[i][1:].strip(), initial_indent="* ", subsequent_indent=" " + ) else: n = 0 wlines = textwrap.wrap(lines[i].strip()) if len(wlines) == 0: - wlines = [''] - lines[i:i+1] = wlines + wlines = [""] + lines[i : i + 1] = wlines i += len(wlines) - return ''.join(" "*(indent-1)+x+"\n" for x in lines) + return "".join(" " * (indent - 1) + x + "\n" for x in lines) + class WikiSection: def __init__(self, heading, body): self.heading = heading self.body = body - #self.lines = re.split("\n+", body.strip()) - #if len(self.lines) == 1 and len(self.lines[0]) == 0: + # self.lines = re.split("\n+", body.strip()) + # if len(self.lines) == 1 and len(self.lines[0]) == 0: # self.lines = [] self.children = [] + def __str__(self): - return "<%s:%i:%s>" % (self.heading, len(self.body or ""), ','.join([str(x) for x in self.children])) + return "<%s:%i:%s>" % ( + self.heading, + len(self.body or ""), + ",".join([str(x) for x in self.children]), + ) + def add(self, section): self.children.append(section) + def parse(word, text): headings = list(re.finditer("^(=+)\s*(.*?)\s*=+\n", text, re.MULTILINE)) - #print [x.group(1) for x in headings] + # print [x.group(1) for x in headings] doc = WikiSection(word, "") stack = [doc] for i, m in enumerate(headings): @@ -599,35 +650,40 @@ def parse(word, text): s = WikiSection(None, "") stack[-1].add(s) stack.append(s) - if i+1 < len(headings): - s = WikiSection(m.group(2), text[m.end(0):headings[i+1].start(0)].strip()) + if i + 1 < len(headings): + s = WikiSection( + m.group(2), text[m.end(0) : headings[i + 1].start(0)].strip() + ) else: - s = WikiSection(m.group(2), text[m.end(0):].strip()) + s = WikiSection(m.group(2), text[m.end(0) :].strip()) assert len(stack) == depth stack[-1].add(s) stack.append(s) - #while doc.heading is None and len(doc.lines) == 0 and len(doc.children) == 1: + # while doc.heading is None and len(doc.lines) == 0 and len(doc.children) == 1: # doc = doc.children[0] return doc + def formatFull(word, doc): def f(depth, section): if section.heading: - r = " "*(depth-1) + section.heading + "\n\n" + r = " " * (depth - 1) + section.heading + "\n\n" else: r = "" if section.body: - r += dewiki(section.body, depth+1)+"\n" - #r += "".join(" "*depth + x + "\n" for x in dewiki(section.body)) - #if len(section.lines) > 0: + r += dewiki(section.body, depth + 1) + "\n" + # r += "".join(" "*depth + x + "\n" for x in dewiki(section.body)) + # if len(section.lines) > 0: # r += "\n" for c in section.children: - r += f(depth+1, c) + r += f(depth + 1, c) return r + s = f(0, doc) s += "Ref: http://en.wiktionary.org/wiki/%s\n" % word return s + def formatNormal(word, doc): def f(depth, posdepth, section): r = "" @@ -635,29 +691,31 @@ def f(depth, posdepth, section): if not section.heading or section.heading.startswith("Etymology"): posdepth += 1 elif section.heading in Parts: - #p = Parts[section.heading] - #if p: + # p = Parts[section.heading] + # if p: # r += " "*(depth-1) + word + " (" + p + ")\n\n" - r += " "*(depth-1) + section.heading + "\n\n" + r += " " * (depth - 1) + section.heading + "\n\n" else: print("Unknown part: (%s) %s" % (word, section.heading), file=errors) return "" elif depth > posdepth: return "" elif section.heading: - r += " "*(depth-1) + section.heading + "\n\n" + r += " " * (depth - 1) + section.heading + "\n\n" if section.body: - r += dewiki(section.body, depth+1)+"\n" - #r += "".join(" "*depth + x + "\n" for x in dewiki(section.lines)) - #if len(section.lines) > 0: + r += dewiki(section.body, depth + 1) + "\n" + # r += "".join(" "*depth + x + "\n" for x in dewiki(section.lines)) + # if len(section.lines) > 0: # r += "\n" for c in section.children: - r += f(depth+1, posdepth, c) + r += f(depth + 1, posdepth, c) return r + s = f(0, 3, doc) s += "Ref: http://en.wiktionary.org/wiki/%s\n" % word return s + def formatBrief(word, doc): def f(depth, posdepth, section): if depth == posdepth: @@ -665,8 +723,8 @@ def f(depth, posdepth, section): if not section.heading or section.heading.startswith("Etymology"): posdepth += 1 elif section.heading in Parts: - #h = Parts[section.heading] - #if h: + # h = Parts[section.heading] + # if h: # h = "%s (%s)" % (word, h) pass stack.append([h, False]) @@ -675,36 +733,42 @@ def f(depth, posdepth, section): else: stack.append(["%h " + section.heading, False]) r = "" - #if section.heading: + # if section.heading: # r += " "*(depth-1) + section.heading + "\n" - body = ''.join(x+"\n" for x in section.body.split("\n") if len(x) > 0 and x[0] == '#') + body = "".join( + x + "\n" for x in section.body.split("\n") if len(x) > 0 and x[0] == "#" + ) if len(body) > 0: for i in range(len(stack)): if not stack[i][1]: if stack[i][0]: - r += " "*(i-1) + stack[i][0] + "\n" + r += " " * (i - 1) + stack[i][0] + "\n" stack[i][1] = True - r += dewiki(body, depth+1) + r += dewiki(body, depth + 1) for c in section.children: - r += f(depth+1, posdepth, c) + r += f(depth + 1, posdepth, c) stack.pop() return r + stack = [] s = f(0, 3, doc) s += "Ref: http://en.wiktionary.org/wiki/%s\n" % word return s + class WikiHandler(xml.sax.ContentHandler): def __init__(self): self.element = None self.page = None self.text = "" self.long = {} + def startElement(self, name, attrs): - #print "start", name, attrs + # print "start", name, attrs self.element = name + def endElement(self, name): - #print "end", name + # print "end", name if self.element == "text": if self.page: if self.page in self.long: @@ -714,8 +778,9 @@ def endElement(self, name): self.page = None self.text = "" self.element = None + def characters(self, content): - #print "characters", content + # print "characters", content if self.element == "title": if self.checkPage(content): self.page = content @@ -724,20 +789,26 @@ def characters(self, content): self.text += content if len(self.text) > 100000 and self.page not in self.long: self.long[self.page] = 1 + def checkPage(self, page): return False + def doPage(self, page, text): pass + class TemplateHandler(WikiHandler): def checkPage(self, page): return page.startswith("Template:") + def doPage(self, page, text): - Templates[page[page.find(':')+1:].lower()] = text + Templates[page[page.find(":") + 1 :].lower()] = text + class WordHandler(WikiHandler): def checkPage(self, page): - return ':' not in page + return ":" not in page + def doPage(self, page, text): m = re.match(r"#redirect\s*\[\[(.*?)\]\]", text, re.IGNORECASE) if m: @@ -745,7 +816,8 @@ def doPage(self, page, text): return doc = parse(page, text) out.write(formatBrief(page, doc)) - #print formatBrief(page, doc) + # print formatBrief(page, doc) + fn = sys.argv[1] info = """ This file was converted from the original database on: @@ -767,7 +839,9 @@ def doPage(self, page, text): f.close() f = os.popen("bunzip2 -c %s" % fn, "r") -out = os.popen("dictfmt -p wiktionary-en --utf8 --columns 0 -u http://en.wiktionary.org", "w") +out = os.popen( + "dictfmt -p wiktionary-en --utf8 --columns 0 -u http://en.wiktionary.org", "w" +) out.write("%%h English Wiktionary\n%s" % info) xml.sax.parse(f, WordHandler()) diff --git a/pkgs/servers/dict/wordnet_structures.py b/pkgs/servers/dict/wordnet_structures.py index e5c80b968fc23..0bb05c1b840e7 100755 --- a/pkgs/servers/dict/wordnet_structures.py +++ b/pkgs/servers/dict/wordnet_structures.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -#Copyright 2007 Sebastian Hagen +# Copyright 2007 Sebastian Hagen # This file is part of wordnet_tools. # wordnet_tools is free software; you can redistribute it and/or modify @@ -35,156 +35,177 @@ CAT_VERB = 3 category_map = { - 'n': CAT_NOUN, - 'v': CAT_VERB, - 'a': CAT_ADJECTIVE, - 's': CAT_ADJECTIVE, - 'r': CAT_ADVERB + "n": CAT_NOUN, + "v": CAT_VERB, + "a": CAT_ADJECTIVE, + "s": CAT_ADJECTIVE, + "r": CAT_ADVERB, } class WordIndex: - def __init__(self, lemma, category, ptrs, synsets, tagsense_count): - self.lemma = lemma - self.category = category - self.ptrs = ptrs - self.synsets = synsets - self.tagsense_count = tagsense_count - - @classmethod - def build_from_line(cls, line_data, synset_map): - line_split = line_data.split() - lemma = line_split[0] - category = category_map[line_split[1]] - synset_count = int(line_split[2],10) - ptr_count = int(line_split[3],10) - ptrs = [line_split[i] for i in range(3, 3+ptr_count)] - tagsense_count = int(line_split[5 + ptr_count],10) - synsets = [synset_map[int(line_split[i],10)] for i in range(6 + ptr_count, 6 + ptr_count + synset_count)] - return cls(lemma, category, ptrs, synsets, tagsense_count) - - @classmethod - def build_from_file(cls, f, synset_map, rv_base=None): - if (rv_base is None): - rv = {} - else: - rv = rv_base - - for line in f: - if (line.startswith(' ')): - continue - wi = cls.build_from_line(line, synset_map) - word = wi.lemma.lower() - if not (word in rv): - rv[word] = [] - rv[word].append(wi) - return rv - - def __repr__(self): - return '%s%s' % (self.__class__.__name__, (self.lemma, self.category, self.ptrs, self.synsets, self.tagsense_count)) + def __init__(self, lemma, category, ptrs, synsets, tagsense_count): + self.lemma = lemma + self.category = category + self.ptrs = ptrs + self.synsets = synsets + self.tagsense_count = tagsense_count + + @classmethod + def build_from_line(cls, line_data, synset_map): + line_split = line_data.split() + lemma = line_split[0] + category = category_map[line_split[1]] + synset_count = int(line_split[2], 10) + ptr_count = int(line_split[3], 10) + ptrs = [line_split[i] for i in range(3, 3 + ptr_count)] + tagsense_count = int(line_split[5 + ptr_count], 10) + synsets = [ + synset_map[int(line_split[i], 10)] + for i in range(6 + ptr_count, 6 + ptr_count + synset_count) + ] + return cls(lemma, category, ptrs, synsets, tagsense_count) + + @classmethod + def build_from_file(cls, f, synset_map, rv_base=None): + if rv_base is None: + rv = {} + else: + rv = rv_base + + for line in f: + if line.startswith(" "): + continue + wi = cls.build_from_line(line, synset_map) + word = wi.lemma.lower() + if not (word in rv): + rv[word] = [] + rv[word].append(wi) + return rv + + def __repr__(self): + return "%s%s" % ( + self.__class__.__name__, + (self.lemma, self.category, self.ptrs, self.synsets, self.tagsense_count), + ) class WordIndexDictFormatter(WordIndex): - category_map_rev = { - CAT_NOUN: 'n', - CAT_VERB: 'v', - CAT_ADJECTIVE: 'adj', - CAT_ADVERB: 'adv' - } - linesep = '\n' - LINE_WIDTH_MAX = 68 - prefix_fmtf_line_first = '%5s 1: ' - prefix_fmtn_line_first = ' ' - prefix_fmtf_line_nonfirst = '%5d: ' - prefix_fmtn_line_nonfirst = ' ' - - def dict_str(self): - tw = TextWrapper(width=self.LINE_WIDTH_MAX, - initial_indent=(self.prefix_fmtf_line_first % self.category_map_rev[self.category]), - subsequent_indent=self.prefix_fmtn_line_first) - - lines = (tw.wrap(self.synsets[0].dict_str())) - i = 2 - for synset in self.synsets[1:]: - tw = TextWrapper(width=self.LINE_WIDTH_MAX, - initial_indent=(self.prefix_fmtf_line_nonfirst % i), - subsequent_indent=self.prefix_fmtn_line_nonfirst) - lines.extend(tw.wrap(synset.dict_str())) - i += 1 - return self.linesep.join(lines) + category_map_rev = { + CAT_NOUN: "n", + CAT_VERB: "v", + CAT_ADJECTIVE: "adj", + CAT_ADVERB: "adv", + } + linesep = "\n" + LINE_WIDTH_MAX = 68 + prefix_fmtf_line_first = "%5s 1: " + prefix_fmtn_line_first = " " + prefix_fmtf_line_nonfirst = "%5d: " + prefix_fmtn_line_nonfirst = " " + + def dict_str(self): + tw = TextWrapper( + width=self.LINE_WIDTH_MAX, + initial_indent=( + self.prefix_fmtf_line_first % self.category_map_rev[self.category] + ), + subsequent_indent=self.prefix_fmtn_line_first, + ) + + lines = tw.wrap(self.synsets[0].dict_str()) + i = 2 + for synset in self.synsets[1:]: + tw = TextWrapper( + width=self.LINE_WIDTH_MAX, + initial_indent=(self.prefix_fmtf_line_nonfirst % i), + subsequent_indent=self.prefix_fmtn_line_nonfirst, + ) + lines.extend(tw.wrap(synset.dict_str())) + i += 1 + return self.linesep.join(lines) class Synset: - def __init__(self, offset, ss_type, words, ptrs, gloss, frames=()): - self.offset = offset - self.type = ss_type - self.words = words - self.ptrs = ptrs - self.gloss = gloss - self.frames = frames - self.comments = [] - - @classmethod - def build_from_line(cls, line_data): - line_split = line_data.split() - synset_offset = int(line_split[0],10) - ss_type = category_map[line_split[2]] - word_count = int(line_split[3],16) - words = [line_split[i] for i in range(4, 4 + word_count*2,2)] - ptr_count = int(line_split[4 + word_count*2],10) - ptrs = [(line_split[i], line_split[i+1], line_split[i+2], line_split[i+3]) for i in range(5 + word_count*2,4 + word_count*2 + ptr_count*4,4)] - - tok = line_split[5 + word_count*2 + ptr_count*4] - base = 6 + word_count*2 + ptr_count*4 - if (tok != '|'): - frame_count = int(tok, 10) - frames = [(int(line_split[i+1],10), int(line_split[i+2],16)) for i in range(base, base + frame_count*3, 3)] - base += frame_count*3 + 1 - else: - frames = [] - - line_split2 = line_data.split(None, base) - if (len(line_split2) < base): - gloss = None - else: - gloss = line_split2[-1] - - return cls(synset_offset, ss_type, words, ptrs, gloss, frames) - - @classmethod - def build_from_file(cls, f): - rv = {} - comments = [] - - for line in f: - if (line.startswith(' ')): - line_s = line.lstrip().rstrip('\n') - line_elements = line_s.split(None,1) - try: - int(line_elements[0]) - except ValueError: - continue - if (len(line_elements) == 1): - line_elements.append('') - comments.append(line_elements[1]) - continue - synset = cls.build_from_line(line.rstrip()) - rv[synset.offset] = synset - - return (rv, comments) - - def dict_str(self): - rv = self.gloss - if (len(self.words) > 1): - rv += ' [syn: %s]' % (', '.join([('{%s}' % word) for word in self.words])) - return rv - - def __repr__(self): - return '%s%s' % (self.__class__.__name__, (self.offset, self.type, self.words, self.ptrs, self.gloss, self.frames)) + def __init__(self, offset, ss_type, words, ptrs, gloss, frames=()): + self.offset = offset + self.type = ss_type + self.words = words + self.ptrs = ptrs + self.gloss = gloss + self.frames = frames + self.comments = [] + + @classmethod + def build_from_line(cls, line_data): + line_split = line_data.split() + synset_offset = int(line_split[0], 10) + ss_type = category_map[line_split[2]] + word_count = int(line_split[3], 16) + words = [line_split[i] for i in range(4, 4 + word_count * 2, 2)] + ptr_count = int(line_split[4 + word_count * 2], 10) + ptrs = [ + (line_split[i], line_split[i + 1], line_split[i + 2], line_split[i + 3]) + for i in range(5 + word_count * 2, 4 + word_count * 2 + ptr_count * 4, 4) + ] + + tok = line_split[5 + word_count * 2 + ptr_count * 4] + base = 6 + word_count * 2 + ptr_count * 4 + if tok != "|": + frame_count = int(tok, 10) + frames = [ + (int(line_split[i + 1], 10), int(line_split[i + 2], 16)) + for i in range(base, base + frame_count * 3, 3) + ] + base += frame_count * 3 + 1 + else: + frames = [] + + line_split2 = line_data.split(None, base) + if len(line_split2) < base: + gloss = None + else: + gloss = line_split2[-1] + + return cls(synset_offset, ss_type, words, ptrs, gloss, frames) + + @classmethod + def build_from_file(cls, f): + rv = {} + comments = [] + + for line in f: + if line.startswith(" "): + line_s = line.lstrip().rstrip("\n") + line_elements = line_s.split(None, 1) + try: + int(line_elements[0]) + except ValueError: + continue + if len(line_elements) == 1: + line_elements.append("") + comments.append(line_elements[1]) + continue + synset = cls.build_from_line(line.rstrip()) + rv[synset.offset] = synset + + return (rv, comments) + + def dict_str(self): + rv = self.gloss + if len(self.words) > 1: + rv += " [syn: %s]" % (", ".join([("{%s}" % word) for word in self.words])) + return rv + + def __repr__(self): + return "%s%s" % ( + self.__class__.__name__, + (self.offset, self.type, self.words, self.ptrs, self.gloss, self.frames), + ) class WordnetDict: - db_info_fmt = '''This file was converted from the original database on: + db_info_fmt = """This file was converted from the original database on: %(conversion_datetime)s The original data is available from: @@ -194,127 +215,190 @@ class WordnetDict: additional restrictions are claimed. Please redistribute this changed version under the same conditions and restriction that apply to the original version.\n\n -%(wn_license)s''' - - datetime_fmt = '%Y-%m-%dT%H:%M:%S' - base64_map = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' - - def __init__(self, wn_url, desc_short, desc_long): - self.word_data = {} - self.wn_url = wn_url - self.desc_short = desc_short - self.desc_long = desc_long - self.wn_license = None - - def wn_dict_add(self, file_index, file_data): - file_data.seek(0) - file_index.seek(0) - (synsets, license_lines) = Synset.build_from_file(file_data) - WordIndexDictFormatter.build_from_file(file_index, synsets, self.word_data) - if (license_lines): - self.wn_license = '\n'.join(license_lines) + '\n' - - @classmethod - def base64_encode(cls, i): - """Encode a non-negative integer into a dictd compatible base64 string""" - if (i < 0): - raise ValueError('Value %r for i is negative' % (i,)) - r = 63 - e = 1 - while (r < i): - e += 1 - r = 64**e - 1 - - rv = '' - while (e > 0): - e -= 1 - d = math.floor(i / 64**e) - rv += cls.base64_map[d] - i = i % (64**e) - return rv - - @classmethod - def dict_entry_write(cls, file_index, file_data, key, entry, linesep='\n'): - """Write a single dict entry for to index and data files""" - entry_start = file_data.tell() - file_data.write(entry) - entry_len = len(entry) - file_index.write('%s\t%s\t%s%s' % (key, cls.base64_encode(entry_start), - cls.base64_encode(entry_len), linesep)) - - def dict_generate(self, file_index, file_data): - file_index.seek(0) - file_data.seek(0) - # The dictd file format is fairly iffy on the subject of special - # headwords: either dictd is buggy, or the manpage doesn't tell the whole - # story about the format. - # The upshot is that order of these entries in the index *matters*. - # Putting them at the beginning and in alphabetic order is afaict ok. - # Some other orders completely and quietly break the ability to look - # those headwords up. - # -- problem encountered with 1.10.2, at 2007-08-05. - file_data.write('\n') - wn_url = self.wn_url - conversion_datetime = datetime.datetime.now().strftime(self.datetime_fmt) - wn_license = self.wn_license - self.dict_entry_write(file_index, file_data, '00-database-info', '00-database-info\n%s\n' % (self.db_info_fmt % vars())) - self.dict_entry_write(file_index, file_data, '00-database-long', '00-database-long\n%s\n' % self.desc_long) - self.dict_entry_write(file_index, file_data, '00-database-short', '00-database-short\n%s\n' % self.desc_short) - self.dict_entry_write(file_index, file_data, '00-database-url', '00-database-url\n%s\n' % self.wn_url) - - - words = list(self.word_data.keys()) - words.sort() - for word in words: - for wi in self.word_data[word]: - word_cs = word - # Use case-sensitivity information of first entry of first synset that - # matches this word case-insensitively - for synset in wi.synsets: - for ss_word in synset.words: - if (ss_word.lower() == word_cs.lower()): - word_cs = ss_word - break - else: - continue - break - else: - continue - break - - outstr = '' - for wi in self.word_data[word]: - outstr += wi.dict_str() + '\n' - - outstr = '%s%s%s' % (word_cs, wi.linesep, outstr) - self.dict_entry_write(file_index, file_data, word_cs, outstr, wi.linesep) - - file_index.truncate() - file_data.truncate() - - -if (__name__ == '__main__'): - import optparse - op = optparse.OptionParser(usage='usage: %prog [options] ( )+') - op.add_option('-i', '--outindex', dest='oi', default='wn.index', help='filename of index file to write to') - op.add_option('-d', '--outdata', dest='od', default='wn.dict', help='filename of data file to write to') - op.add_option('--wn_url', dest='wn_url', default='ftp://ftp.cogsci.princeton.edu/pub/wordnet/2.0', help='URL for wordnet sources') - op.add_option('--db_desc_short', dest='desc_short', default=' WordNet (r) 2.1 (2005)', help='short dict DB description') - op.add_option('--db_desc_long', dest='desc_long', default=' WordNet (r): A Lexical Database for English from the\n Cognitive Science Laboratory at Princeton University', help='long dict DB description') - - (options, args) = op.parse_args() - - wnd = WordnetDict(wn_url=options.wn_url, desc_short=options.desc_short, desc_long=options.desc_long) - - for i in range(0,len(args),2): - print('Opening index file %r...' % args[i]) - file_index = open(args[i]) - print('Opening data file %r...' % args[i+1]) - file_data = open(args[i+1]) - print('Parsing index file and data file...') - wnd.wn_dict_add(file_index, file_data) - - print('All input files parsed. Writing output to index file %r and data file %r.' % (options.oi, options.od)) - - wnd.dict_generate(open(options.oi, 'w'),open(options.od, 'w')) - print('All done.') +%(wn_license)s""" + + datetime_fmt = "%Y-%m-%dT%H:%M:%S" + base64_map = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" + + def __init__(self, wn_url, desc_short, desc_long): + self.word_data = {} + self.wn_url = wn_url + self.desc_short = desc_short + self.desc_long = desc_long + self.wn_license = None + + def wn_dict_add(self, file_index, file_data): + file_data.seek(0) + file_index.seek(0) + (synsets, license_lines) = Synset.build_from_file(file_data) + WordIndexDictFormatter.build_from_file(file_index, synsets, self.word_data) + if license_lines: + self.wn_license = "\n".join(license_lines) + "\n" + + @classmethod + def base64_encode(cls, i): + """Encode a non-negative integer into a dictd compatible base64 string""" + if i < 0: + raise ValueError("Value %r for i is negative" % (i,)) + r = 63 + e = 1 + while r < i: + e += 1 + r = 64**e - 1 + + rv = "" + while e > 0: + e -= 1 + d = math.floor(i / 64**e) + rv += cls.base64_map[d] + i = i % (64**e) + return rv + + @classmethod + def dict_entry_write(cls, file_index, file_data, key, entry, linesep="\n"): + """Write a single dict entry for to index and data files""" + entry_start = file_data.tell() + file_data.write(entry) + entry_len = len(entry) + file_index.write( + "%s\t%s\t%s%s" + % ( + key, + cls.base64_encode(entry_start), + cls.base64_encode(entry_len), + linesep, + ) + ) + + def dict_generate(self, file_index, file_data): + file_index.seek(0) + file_data.seek(0) + # The dictd file format is fairly iffy on the subject of special + # headwords: either dictd is buggy, or the manpage doesn't tell the whole + # story about the format. + # The upshot is that order of these entries in the index *matters*. + # Putting them at the beginning and in alphabetic order is afaict ok. + # Some other orders completely and quietly break the ability to look + # those headwords up. + # -- problem encountered with 1.10.2, at 2007-08-05. + file_data.write("\n") + wn_url = self.wn_url + conversion_datetime = datetime.datetime.now().strftime(self.datetime_fmt) + wn_license = self.wn_license + self.dict_entry_write( + file_index, + file_data, + "00-database-info", + "00-database-info\n%s\n" % (self.db_info_fmt % vars()), + ) + self.dict_entry_write( + file_index, + file_data, + "00-database-long", + "00-database-long\n%s\n" % self.desc_long, + ) + self.dict_entry_write( + file_index, + file_data, + "00-database-short", + "00-database-short\n%s\n" % self.desc_short, + ) + self.dict_entry_write( + file_index, + file_data, + "00-database-url", + "00-database-url\n%s\n" % self.wn_url, + ) + + words = list(self.word_data.keys()) + words.sort() + for word in words: + for wi in self.word_data[word]: + word_cs = word + # Use case-sensitivity information of first entry of first synset that + # matches this word case-insensitively + for synset in wi.synsets: + for ss_word in synset.words: + if ss_word.lower() == word_cs.lower(): + word_cs = ss_word + break + else: + continue + break + else: + continue + break + + outstr = "" + for wi in self.word_data[word]: + outstr += wi.dict_str() + "\n" + + outstr = "%s%s%s" % (word_cs, wi.linesep, outstr) + self.dict_entry_write(file_index, file_data, word_cs, outstr, wi.linesep) + + file_index.truncate() + file_data.truncate() + + +if __name__ == "__main__": + import optparse + + op = optparse.OptionParser( + usage="usage: %prog [options] ( )+" + ) + op.add_option( + "-i", + "--outindex", + dest="oi", + default="wn.index", + help="filename of index file to write to", + ) + op.add_option( + "-d", + "--outdata", + dest="od", + default="wn.dict", + help="filename of data file to write to", + ) + op.add_option( + "--wn_url", + dest="wn_url", + default="ftp://ftp.cogsci.princeton.edu/pub/wordnet/2.0", + help="URL for wordnet sources", + ) + op.add_option( + "--db_desc_short", + dest="desc_short", + default=" WordNet (r) 2.1 (2005)", + help="short dict DB description", + ) + op.add_option( + "--db_desc_long", + dest="desc_long", + default=" WordNet (r): A Lexical Database for English from the\n Cognitive Science Laboratory at Princeton University", + help="long dict DB description", + ) + + (options, args) = op.parse_args() + + wnd = WordnetDict( + wn_url=options.wn_url, + desc_short=options.desc_short, + desc_long=options.desc_long, + ) + + for i in range(0, len(args), 2): + print("Opening index file %r..." % args[i]) + file_index = open(args[i]) + print("Opening data file %r..." % args[i + 1]) + file_data = open(args[i + 1]) + print("Parsing index file and data file...") + wnd.wn_dict_add(file_index, file_data) + + print( + "All input files parsed. Writing output to index file %r and data file %r." + % (options.oi, options.od) + ) + + wnd.dict_generate(open(options.oi, "w"), open(options.od, "w")) + print("All done.") diff --git a/pkgs/servers/home-assistant/update-component-packages.py b/pkgs/servers/home-assistant/update-component-packages.py index 3667f70331bd0..4f0a32c0f5078 100755 --- a/pkgs/servers/home-assistant/update-component-packages.py +++ b/pkgs/servers/home-assistant/update-component-packages.py @@ -56,9 +56,7 @@ # Some dependencies are loaded dynamically at runtime, and are not # mentioned in the manifest files. EXTRA_COMPONENT_DEPS = { - "conversation": [ - "intent" - ], + "conversation": ["intent"], "default_config": [ "backup", ], @@ -76,7 +74,6 @@ } - def run_sync(cmd: List[str]) -> None: print(f"$ {' '.join(cmd)}") process = subprocess.run(cmd) @@ -100,7 +97,9 @@ def parse_components(version: str = "master"): with urlopen( f"https://github.com/home-assistant/home-assistant/archive/{version}.tar.gz" ) as response: - tarfile.open(fileobj=BytesIO(response.read())).extractall(tmp, filter="data") + tarfile.open(fileobj=BytesIO(response.read())).extractall( + tmp, filter="data" + ) # Use part of a script from the Home Assistant codebase core_path = os.path.join(tmp, f"core-{version}") @@ -110,6 +109,7 @@ def parse_components(version: str = "master"): sys.path.append(core_path) from script.hassfest.model import Config, Integration # type: ignore + config = Config( root=pathlib.Path(core_path), specific_integrations=None, @@ -128,7 +128,9 @@ def parse_components(version: str = "master"): # Recursively get the requirements of a component and its dependencies -def get_reqs(components: Dict[str, Dict[str, Any]], component: str, processed: Set[str]) -> Set[str]: +def get_reqs( + components: Dict[str, Dict[str, Any]], component: str, processed: Set[str] +) -> Set[str]: requirements = set(components[component].get("requirements", [])) deps = components[component].get("dependencies", []) deps.extend(components[component].get("after_dependencies", [])) @@ -173,7 +175,9 @@ def dump_packages() -> Dict[str, Dict[str, str]]: "-qa", "-A", PKG_SET, - "--arg", "config", "{ allowAliases = false; }", + "--arg", + "config", + "{ allowAliases = false; }", "--json", ] ) @@ -207,7 +211,9 @@ def name_to_attr_path(req: str, packages: Dict[str, Dict[str, str]]) -> Optional return None -def get_pkg_version(attr_path: str, packages: Dict[str, Dict[str, str]]) -> Optional[str]: +def get_pkg_version( + attr_path: str, packages: Dict[str, Dict[str, str]] +) -> Optional[str]: pkg = packages.get(attr_path, None) if not pkg: return None @@ -236,8 +242,8 @@ def main() -> None: # Split package name and extra requires extras = [] if name.endswith("]"): - extras = name[name.find("[")+1:name.find("]")].split(",") - name = name[:name.find("[")] + extras = name[name.find("[") + 1 : name.find("]")].split(",") + name = name[: name.find("[")] attr_path = name_to_attr_path(name, packages) if attr_path: if our_version := get_pkg_version(attr_path, packages): @@ -246,24 +252,33 @@ def main() -> None: try: Version.parse(our_version) except InvalidVersion: - print(f"Attribute {attr_name} has invalid version specifier {our_version}", file=sys.stderr) + print( + f"Attribute {attr_name} has invalid version specifier {our_version}", + file=sys.stderr, + ) # allow specifying that our unstable version is newer than some version - if newer_than_version := OUR_VERSION_IS_NEWER_THAN.get(attr_name): - attr_outdated = Version.parse(newer_than_version) < Version.parse(required_version) + if newer_than_version := OUR_VERSION_IS_NEWER_THAN.get( + attr_name + ): + attr_outdated = Version.parse( + newer_than_version + ) < Version.parse(required_version) else: attr_outdated = True else: - attr_outdated = Version.parse(our_version) < Version.parse(required_version) + attr_outdated = Version.parse(our_version) < Version.parse( + required_version + ) finally: if attr_outdated: outdated[attr_name] = { - 'wanted': required_version, - 'current': our_version + "wanted": required_version, + "current": our_version, } if attr_path is not None: # Add attribute path without "python3Packages." prefix - pname = attr_path[len(PKG_SET + "."):] + pname = attr_path[len(PKG_SET + ".") :] attr_paths.append(pname) for extra in extras: # Check if package advertises extra requirements @@ -298,7 +313,9 @@ def main() -> None: f.write(f" # missing inputs: {' '.join(sorted(missing))}") f.write("\n") f.write(" };\n") - f.write(" # components listed in tests/components for which all dependencies are packaged\n") + f.write( + " # components listed in tests/components for which all dependencies are packaged\n" + ) f.write(" supportedComponentsWithTests = [\n") for component, deps in build_inputs.items(): available, extras, missing = deps @@ -309,11 +326,14 @@ def main() -> None: run_sync(["nixfmt", outpath]) - supported_components = reduce(lambda n, c: n + (build_inputs[c][2] == []), - components.keys(), 0) + supported_components = reduce( + lambda n, c: n + (build_inputs[c][2] == []), components.keys(), 0 + ) total_components = len(components) - print(f"{supported_components} / {total_components} components supported, " - f"i.e. {supported_components / total_components:.2%}") + print( + f"{supported_components} / {total_components} components supported, " + f"i.e. {supported_components / total_components:.2%}" + ) if outdated: table = Table(title="Outdated dependencies") @@ -321,7 +341,7 @@ def main() -> None: table.add_column("Current") table.add_column("Wanted") for package, version in sorted(outdated.items()): - table.add_row(package, version['current'], version['wanted']) + table.add_row(package, version["current"], version["wanted"]) console = Console() console.print(table) diff --git a/pkgs/servers/home-assistant/update.py b/pkgs/servers/home-assistant/update.py index c75bbc432aa30..f508511a25cd4 100755 --- a/pkgs/servers/home-assistant/update.py +++ b/pkgs/servers/home-assistant/update.py @@ -14,11 +14,17 @@ from aiohttp import ClientSession from packaging.version import Version -ROOT: Final = check_output([ - "git", - "rev-parse", - "--show-toplevel", -]).decode().strip() +ROOT: Final = ( + check_output( + [ + "git", + "rev-parse", + "--show-toplevel", + ] + ) + .decode() + .strip() +) def run_sync(cmd: List[str]) -> None: @@ -32,9 +38,7 @@ def run_sync(cmd: List[str]) -> None: async def check_async(cmd: List[str]) -> str: print(f"$ {' '.join(cmd)}") process = await asyncio.create_subprocess_exec( - *cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE + *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE ) stdout, stderr = await process.communicate() @@ -73,8 +77,7 @@ def __enter__(self): def get_exact_match(self, attr: str, value: str): matches = re.findall( - rf'{re.escape(attr)}\s+=\s+\"?{re.escape(value)}\"?', - self.text + rf"{re.escape(attr)}\s+=\s+\"?{re.escape(value)}\"?", self.text ) n = len(matches) @@ -95,6 +98,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): with open(self.path, "w") as handle: handle.write(self.text) + class Nurl: @classmethod async def prefetch(cls, url: str, version: str, *extra_args: str) -> str: @@ -112,7 +116,8 @@ class Nix: base_cmd: Final = [ "nix", "--show-trace", - "--extra-experimental-features", "nix-command" + "--extra-experimental-features", + "nix-command", ] @classmethod @@ -121,12 +126,7 @@ async def _run(cls, args: List[str]) -> Optional[str]: @classmethod async def eval(cls, expr: str) -> Union[List, Dict, int, float, str, bool]: - response = await cls._run([ - "eval", - "-f", f"{ROOT}/default.nix", - "--json", - expr - ]) + response = await cls._run(["eval", "-f", f"{ROOT}/default.nix", "--json", expr]) if response is None: raise RuntimeError("Nix eval expression returned no response") try: @@ -136,12 +136,7 @@ async def eval(cls, expr: str) -> Union[List, Dict, int, float, str, bool]: @classmethod async def hash_to_sri(cls, algorithm: str, value: str) -> Optional[str]: - return await cls._run([ - "hash", - "to-sri", - "--type", algorithm, - value - ]) + return await cls._run(["hash", "to-sri", "--type", algorithm, value]) class HomeAssistant: @@ -149,9 +144,7 @@ def __init__(self, session: ClientSession): self._session = session async def get_latest_core_version( - self, - owner: str = "home-assistant", - repo: str = "core" + self, owner: str = "home-assistant", repo: str = "core" ) -> str: async with self._session.get( f"https://api.github.com/repos/{owner}/{repo}/releases/latest" @@ -162,11 +155,7 @@ async def get_latest_core_version( except KeyError: raise RuntimeError("No tag name in response document") - - async def get_latest_frontend_version( - self, - core_version: str - ) -> str: + async def get_latest_frontend_version(self, core_version: str) -> str: async with self._session.get( f"https://raw.githubusercontent.com/home-assistant/core/{core_version}/homeassistant/components/frontend/manifest.json" ) as response: @@ -187,18 +176,19 @@ async def get_latest_frontend_version( _, version = requirement.split("==", maxsplit=1) return str(version) else: - raise RuntimeError( - "Found no version specifier for frontend package" - ) - + raise RuntimeError("Found no version specifier for frontend package") async def update_core(self, old_version: str, new_version: str) -> None: old_sdist_hash = str(await Nix.eval("home-assistant.sdist.outputHash")) - new_sdist_hash = await Nurl.prefetch("https://pypi.org/project/homeassistant/", new_version) + new_sdist_hash = await Nurl.prefetch( + "https://pypi.org/project/homeassistant/", new_version + ) print(f"sdist: {old_sdist_hash} -> {new_sdist_hash}") old_git_hash = str(await Nix.eval("home-assistant.src.outputHash")) - new_git_hash = await Nurl.prefetch("https://github.com/home-assistant/core/", new_version) + new_git_hash = await Nurl.prefetch( + "https://github.com/home-assistant/core/", new_version + ) print(f"git: {old_git_hash} -> {new_git_hash}") with File("pkgs/servers/home-assistant/default.nix") as file: @@ -211,9 +201,15 @@ async def update_frontend(self, old_version: str, new_version: str) -> None: new_hash = await Nurl.prefetch( "https://pypi.org/project/home_assistant_frontend/", new_version, - "-A", "format", "wheel", - "-A", "dist", "py3", - "-A", "python", "py3" + "-A", + "format", + "wheel", + "-A", + "dist", + "py3", + "-A", + "python", + "py3", ) print(f"frontend: {old_hash} -> {new_hash}") @@ -222,9 +218,9 @@ async def update_frontend(self, old_version: str, new_version: str) -> None: file.substitute("hash", old_hash, new_hash) async def update_components(self): - await run_async([ - f"{ROOT}/pkgs/servers/home-assistant/update-component-packages.py" - ]) + await run_async( + [f"{ROOT}/pkgs/servers/home-assistant/update-component-packages.py"] + ) async def main(target_version: Optional[str] = None): @@ -257,6 +253,7 @@ async def main(target_version: Optional[str] = None): # https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown await asyncio.sleep(0) + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("version", nargs="?") diff --git a/pkgs/servers/web-apps/discourse/update.py b/pkgs/servers/web-apps/discourse/update.py index fd774b902274a..2793a6e3b57fa 100755 --- a/pkgs/servers/web-apps/discourse/update.py +++ b/pkgs/servers/web-apps/discourse/update.py @@ -40,11 +40,11 @@ class DiscourseVersion: def __init__(self, version: str): """Take either a tag or version number, calculate the other.""" - if version.startswith('v'): + if version.startswith("v"): self.tag = version - self.version = version.lstrip('v') + self.version = version.lstrip("v") else: - self.tag = 'v' + version + self.tag = "v" + version self.version = version self._version = Version(self.version) @@ -59,17 +59,19 @@ def __gt__(self, other: DiscourseVersion): class DiscourseRepo: - version_regex = re.compile(r'^v\d+\.\d+\.\d+(\.beta\d+)?$') + version_regex = re.compile(r"^v\d+\.\d+\.\d+(\.beta\d+)?$") _latest_commit_sha = None - def __init__(self, owner: str = 'discourse', repo: str = 'discourse'): + def __init__(self, owner: str = "discourse", repo: str = "discourse"): self.owner = owner self.repo = repo @property def versions(self) -> Iterable[str]: - r = requests.get(f'https://api.github.com/repos/{self.owner}/{self.repo}/git/refs/tags').json() - tags = [x['ref'].replace('refs/tags/', '') for x in r] + r = requests.get( + f"https://api.github.com/repos/{self.owner}/{self.repo}/git/refs/tags" + ).json() + tags = [x["ref"].replace("refs/tags/", "") for x in r] # filter out versions not matching version_regex versions = filter(self.version_regex.match, tags) @@ -80,9 +82,11 @@ def versions(self) -> Iterable[str]: @property def latest_commit_sha(self) -> str: if self._latest_commit_sha is None: - r = requests.get(f'https://api.github.com/repos/{self.owner}/{self.repo}/commits?per_page=1') + r = requests.get( + f"https://api.github.com/repos/{self.owner}/{self.repo}/commits?per_page=1" + ) r.raise_for_status() - self._latest_commit_sha = r.json()[0]['sha'] + self._latest_commit_sha = r.json()[0]["sha"] return self._latest_commit_sha @@ -93,14 +97,21 @@ def get_file(self, filepath, rev): :param str rev: the rev to fetch at :return: """ - r = requests.get(f'https://raw.githubusercontent.com/{self.owner}/{self.repo}/{rev}/{filepath}') + r = requests.get( + f"https://raw.githubusercontent.com/{self.owner}/{self.repo}/{rev}/{filepath}" + ) r.raise_for_status() return r.text def _get_build_lock_hash(): - nixpkgs_path = Path(__file__).parent / '../../../../' - output = subprocess.run(['nix-build', '-A', 'discourse'], text=True, cwd=nixpkgs_path, capture_output=True) + nixpkgs_path = Path(__file__).parent / "../../../../" + output = subprocess.run( + ["nix-build", "-A", "discourse"], + text=True, + cwd=nixpkgs_path, + capture_output=True, + ) # The line is of the form " got: sha256-xxx" lines = [i.strip() for i in output.stderr.splitlines()] new_hash_lines = [i.strip("got:").strip() for i in lines if i.startswith("got:")] @@ -118,24 +129,38 @@ def _get_build_lock_hash(): def _call_nix_update(pkg, version): """Call nix-update from nixpkgs root dir.""" - nixpkgs_path = Path(__file__).parent / '../../../../' - return subprocess.check_output(['nix-update', pkg, '--version', version], cwd=nixpkgs_path) + nixpkgs_path = Path(__file__).parent / "../../../../" + return subprocess.check_output( + ["nix-update", pkg, "--version", version], cwd=nixpkgs_path + ) def _nix_eval(expr: str): - nixpkgs_path = Path(__file__).parent / '../../../../' + nixpkgs_path = Path(__file__).parent / "../../../../" try: - output = subprocess.check_output(['nix-instantiate', '--strict', '--json', '--eval', '-E', f'(with import {nixpkgs_path} {{}}; {expr})'], text=True) + output = subprocess.check_output( + [ + "nix-instantiate", + "--strict", + "--json", + "--eval", + "-E", + f"(with import {nixpkgs_path} {{}}; {expr})", + ], + text=True, + ) except subprocess.CalledProcessError: return None return json.loads(output) def _get_current_package_version(pkg: str): - return _nix_eval(f'{pkg}.version') + return _nix_eval(f"{pkg}.version") -def _diff_file(filepath: str, old_version: DiscourseVersion, new_version: DiscourseVersion): +def _diff_file( + filepath: str, old_version: DiscourseVersion, new_version: DiscourseVersion +): repo = DiscourseRepo() current_dir = Path(__file__).parent @@ -144,47 +169,61 @@ def _diff_file(filepath: str, old_version: DiscourseVersion, new_version: Discou new = repo.get_file(filepath, new_version.tag) if old == new: - click.secho(f'{filepath} is unchanged', fg='green') + click.secho(f"{filepath} is unchanged", fg="green") return - with tempfile.NamedTemporaryFile(mode='w') as o, tempfile.NamedTemporaryFile(mode='w') as n: + with ( + tempfile.NamedTemporaryFile(mode="w") as o, + tempfile.NamedTemporaryFile(mode="w") as n, + ): o.write(old), n.write(new) width = shutil.get_terminal_size((80, 20)).columns diff_proc = subprocess.run( - ['diff', '--color=always', f'--width={width}', '-y', o.name, n.name], + ["diff", "--color=always", f"--width={width}", "-y", o.name, n.name], stdout=subprocess.PIPE, cwd=current_dir, - text=True + text=True, ) - click.secho(f'Diff for {filepath} ({old_version.version} -> {new_version.version}):', fg='bright_blue', bold=True) - click.echo(diff_proc.stdout + '\n') + click.secho( + f"Diff for {filepath} ({old_version.version} -> {new_version.version}):", + fg="bright_blue", + bold=True, + ) + click.echo(diff_proc.stdout + "\n") return def _remove_platforms(rubyenv_dir: Path): - for platform in ['arm64-darwin-20', 'x86_64-darwin-18', - 'x86_64-darwin-19', 'x86_64-darwin-20', - 'x86_64-linux', 'aarch64-linux']: - with open(rubyenv_dir / 'Gemfile.lock', 'r') as f: + for platform in [ + "arm64-darwin-20", + "x86_64-darwin-18", + "x86_64-darwin-19", + "x86_64-darwin-20", + "x86_64-linux", + "aarch64-linux", + ]: + with open(rubyenv_dir / "Gemfile.lock", "r") as f: for line in f: if platform in line: subprocess.check_output( - ['bundle', 'lock', '--remove-platform', platform], cwd=rubyenv_dir) + ["bundle", "lock", "--remove-platform", platform], + cwd=rubyenv_dir, + ) break @click_log.simple_verbosity_option(logger) - - @click.group() def cli(): pass @cli.command() -@click.argument('rev', default='latest') -@click.option('--reverse/--no-reverse', default=False, help='Print diffs from REV to current.') +@click.argument("rev", default="latest") +@click.option( + "--reverse/--no-reverse", default=False, help="Print diffs from REV to current." +) def print_diffs(rev, reverse): """Print out diffs for files used as templates for the NixOS module. @@ -197,22 +236,22 @@ def print_diffs(rev, reverse): 'latest'; defaults to 'latest'. """ - if rev == 'latest': + if rev == "latest": repo = DiscourseRepo() rev = repo.versions[0].tag - old_version = DiscourseVersion(_get_current_package_version('discourse')) + old_version = DiscourseVersion(_get_current_package_version("discourse")) new_version = DiscourseVersion(rev) if reverse: old_version, new_version = new_version, old_version - for f in ['config/nginx.sample.conf', 'config/discourse_defaults.conf']: + for f in ["config/nginx.sample.conf", "config/discourse_defaults.conf"]: _diff_file(f, old_version, new_version) @cli.command() -@click.argument('rev', default='latest') +@click.argument("rev", default="latest") def update(rev): """Update gem files and version. @@ -222,7 +261,7 @@ def update(rev): """ repo = DiscourseRepo() - if rev == 'latest': + if rev == "latest": version = repo.versions[0] else: version = DiscourseVersion(rev) @@ -232,24 +271,24 @@ def update(rev): rubyenv_dir = Path(__file__).parent / "rubyEnv" - for fn in ['Gemfile.lock', 'Gemfile']: - with open(rubyenv_dir / fn, 'w') as f: + for fn in ["Gemfile.lock", "Gemfile"]: + with open(rubyenv_dir / fn, "w") as f: f.write(repo.get_file(fn, version.tag)) # work around https://github.com/nix-community/bundix/issues/8 os.environ["BUNDLE_FORCE_RUBY_PLATFORM"] = "true" - subprocess.check_output(['bundle', 'lock'], cwd=rubyenv_dir) + subprocess.check_output(["bundle", "lock"], cwd=rubyenv_dir) _remove_platforms(rubyenv_dir) - subprocess.check_output(['bundix'], cwd=rubyenv_dir) + subprocess.check_output(["bundix"], cwd=rubyenv_dir) - _call_nix_update('discourse', version.version) + _call_nix_update("discourse", version.version) - old_pnpm_hash = _nix_eval('discourse.assets.pnpmDeps.outputHash') + old_pnpm_hash = _nix_eval("discourse.assets.pnpmDeps.outputHash") new_pnpm_hash = _get_build_lock_hash() if new_pnpm_hash is not None: click.echo(f"Updating yarn lock hash: {old_pnpm_hash} -> {new_pnpm_hash}") - with open(Path(__file__).parent / "default.nix", 'r+') as f: + with open(Path(__file__).parent / "default.nix", "r+") as f: content = f.read() content = content.replace(old_pnpm_hash, new_pnpm_hash) f.seek(0) @@ -258,7 +297,7 @@ def update(rev): @cli.command() -@click.argument('rev', default='latest') +@click.argument("rev", default="latest") def update_mail_receiver(rev): """Update discourse-mail-receiver. @@ -268,31 +307,31 @@ def update_mail_receiver(rev): """ repo = DiscourseRepo(repo="mail-receiver") - if rev == 'latest': + if rev == "latest": version = repo.versions[0] else: version = DiscourseVersion(rev) - _call_nix_update('discourse-mail-receiver', version.version) + _call_nix_update("discourse-mail-receiver", version.version) @cli.command() def update_plugins(): """Update plugins to their latest revision.""" plugins = [ - {'name': 'discourse-bbcode-color'}, - {'name': 'discourse-docs'}, - {'name': 'discourse-ldap-auth', 'owner': 'jonmbake'}, - {'name': 'discourse-prometheus'}, - {'name': 'discourse-saved-searches'}, - {'name': 'discourse-yearly-review'}, + {"name": "discourse-bbcode-color"}, + {"name": "discourse-docs"}, + {"name": "discourse-ldap-auth", "owner": "jonmbake"}, + {"name": "discourse-prometheus"}, + {"name": "discourse-saved-searches"}, + {"name": "discourse-yearly-review"}, ] for plugin in plugins: - fetcher = plugin.get('fetcher') or "fetchFromGitHub" - owner = plugin.get('owner') or "discourse" - name = plugin.get('name') - repo_name = plugin.get('repo_name') or name + fetcher = plugin.get("fetcher") or "fetchFromGitHub" + owner = plugin.get("owner") or "discourse" + name = plugin.get("name") + repo_name = plugin.get("repo_name") or name if fetcher == "fetchFromGitHub": url = f"https://github.com/{owner}/{repo_name}" @@ -307,13 +346,20 @@ def update_plugins(): # are incompatible with the packaged Discourse version repo_latest_commit = repo.latest_commit_sha try: - compatibility_spec = repo.get_file('.discourse-compatibility', repo_latest_commit) - versions = [(DiscourseVersion(discourse_version), plugin_rev.strip(' ')) - for [discourse_version, plugin_rev] - in [line.lstrip("< ").split(':') - for line - in compatibility_spec.splitlines() if line != '']] - discourse_version = DiscourseVersion(_get_current_package_version('discourse')) + compatibility_spec = repo.get_file( + ".discourse-compatibility", repo_latest_commit + ) + versions = [ + (DiscourseVersion(discourse_version), plugin_rev.strip(" ")) + for [discourse_version, plugin_rev] in [ + line.lstrip("< ").split(":") + for line in compatibility_spec.splitlines() + if line != "" + ] + ] + discourse_version = DiscourseVersion( + _get_current_package_version("discourse") + ) versions = list(filter(lambda ver: ver[0] >= discourse_version, versions)) if versions == []: rev = repo_latest_commit @@ -323,24 +369,34 @@ def update_plugins(): except requests.exceptions.HTTPError: rev = repo_latest_commit - filename = _nix_eval(f'builtins.unsafeGetAttrPos "src" discourse.plugins.{name}') + filename = _nix_eval( + f'builtins.unsafeGetAttrPos "src" discourse.plugins.{name}' + ) if filename is None: - filename = Path(__file__).parent / 'plugins' / name / 'default.nix' + filename = Path(__file__).parent / "plugins" / name / "default.nix" filename.parent.mkdir() has_ruby_deps = False - for line in repo.get_file('plugin.rb', rev).splitlines(): - if 'gem ' in line: + for line in repo.get_file("plugin.rb", rev).splitlines(): + if "gem " in line: has_ruby_deps = True break - with open(filename, 'w') as f: - f.write(textwrap.dedent(f""" + with open(filename, "w") as f: + f.write( + textwrap.dedent( + f""" {{ lib, mkDiscoursePlugin, fetchFromGitHub }}: mkDiscoursePlugin {{ - name = "{name}";"""[1:] + (""" - bundlerEnvArgs.gemdir = ./.;""" if has_ruby_deps else "") + f""" + name = "{name}";"""[1:] + + ( + """ + bundlerEnvArgs.gemdir = ./.;""" + if has_ruby_deps + else "" + ) + + f""" src = {fetcher} {{ owner = "{owner}"; repo = "{repo_name}"; @@ -353,40 +409,51 @@ def update_plugins(): license = lib.licenses.mit; # change to the correct license! description = ""; }}; - }}""")) + }}""" + ) + ) - all_plugins_filename = Path(__file__).parent / 'plugins' / 'all-plugins.nix' - with open(all_plugins_filename, 'r+') as f: + all_plugins_filename = Path(__file__).parent / "plugins" / "all-plugins.nix" + with open(all_plugins_filename, "r+") as f: content = f.read() pos = -1 - while content[pos] != '}': + while content[pos] != "}": pos -= 1 - content = content[:pos] + f' {name} = callPackage ./{name} {{}};' + os.linesep + content[pos:] + content = ( + content[:pos] + + f" {name} = callPackage ./{name} {{}};" + + os.linesep + + content[pos:] + ) f.seek(0) f.write(content) f.truncate() else: - filename = filename['file'] + filename = filename["file"] - prev_commit_sha = _nix_eval(f'discourse.plugins.{name}.src.rev') + prev_commit_sha = _nix_eval(f"discourse.plugins.{name}.src.rev") if prev_commit_sha == rev: - click.echo(f'Plugin {name} is already at the latest revision') + click.echo(f"Plugin {name} is already at the latest revision") continue - prev_hash = _nix_eval(f'discourse.plugins.{name}.src.outputHash') - new_hash = subprocess.check_output([ - "nurl", - "--fetcher", fetcher, - "--hash", - url, - rev, - ], text=True).strip("\n") + prev_hash = _nix_eval(f"discourse.plugins.{name}.src.outputHash") + new_hash = subprocess.check_output( + [ + "nurl", + "--fetcher", + fetcher, + "--hash", + url, + rev, + ], + text=True, + ).strip("\n") click.echo(f"Update {name}, {prev_commit_sha} -> {rev} in {filename}") - with open(filename, 'r+') as f: + with open(filename, "r+") as f: content = f.read() content = content.replace(prev_commit_sha, rev) content = content.replace(prev_hash, new_hash) @@ -397,36 +464,42 @@ def update_plugins(): rubyenv_dir = Path(filename).parent gemfile = rubyenv_dir / "Gemfile" version_file_regex = re.compile(r'.*File\.expand_path\("\.\./(.*)", __FILE__\)') - gemfile_text = '' - plugin_file = repo.get_file('plugin.rb', rev) - plugin_file = plugin_file.replace(",\n", ", ") # fix split lines + gemfile_text = "" + plugin_file = repo.get_file("plugin.rb", rev) + plugin_file = plugin_file.replace(",\n", ", ") # fix split lines for line in plugin_file.splitlines(): - if 'gem ' in line: - line = ','.join(filter(lambda x: ":require_name" not in x, line.split(','))) + if "gem " in line: + line = ",".join( + filter(lambda x: ":require_name" not in x, line.split(",")) + ) gemfile_text = gemfile_text + line + os.linesep version_file_match = version_file_regex.match(line) if version_file_match is not None: filename = version_file_match.groups()[0] content = repo.get_file(filename, rev) - with open(rubyenv_dir / filename, 'w') as f: + with open(rubyenv_dir / filename, "w") as f: f.write(content) if len(gemfile_text) > 0: if os.path.isfile(gemfile): os.remove(gemfile) - subprocess.check_output(['bundle', 'init'], cwd=rubyenv_dir) - os.chmod(gemfile, stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IROTH) + subprocess.check_output(["bundle", "init"], cwd=rubyenv_dir) + os.chmod( + gemfile, stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IROTH + ) - with open(gemfile, 'a') as f: + with open(gemfile, "a") as f: f.write(gemfile_text) - subprocess.check_output(['bundle', 'lock', '--add-platform', 'ruby'], cwd=rubyenv_dir) - subprocess.check_output(['bundle', 'lock', '--update'], cwd=rubyenv_dir) + subprocess.check_output( + ["bundle", "lock", "--add-platform", "ruby"], cwd=rubyenv_dir + ) + subprocess.check_output(["bundle", "lock", "--update"], cwd=rubyenv_dir) _remove_platforms(rubyenv_dir) - subprocess.check_output(['bundix'], cwd=rubyenv_dir) + subprocess.check_output(["bundix"], cwd=rubyenv_dir) -if __name__ == '__main__': +if __name__ == "__main__": cli() diff --git a/pkgs/servers/x11/xorg/update.py b/pkgs/servers/x11/xorg/update.py index 65a2fe190a17c..9c6e8a7a90bc8 100755 --- a/pkgs/servers/x11/xorg/update.py +++ b/pkgs/servers/x11/xorg/update.py @@ -87,7 +87,7 @@ print("Updating tarballs.list...") with open("./tarballs.list", "w") as f: - f.writelines(f'{tarball}\n' for tarball in updated_tarballs) + f.writelines(f"{tarball}\n" for tarball in updated_tarballs) print("Generating updated expr (slow)...") @@ -100,4 +100,6 @@ print("Committing...") subprocess.run(["git", "add", "default.nix", "tarballs.list"], check=True) -subprocess.run(["git", "commit", "-mxorg.*: update\n\n%s" % "\n".join(changes_text)], check=True) +subprocess.run( + ["git", "commit", "-mxorg.*: update\n\n%s" % "\n".join(changes_text)], check=True +) diff --git a/pkgs/tools/games/minecraft/optifine/update.py b/pkgs/tools/games/minecraft/optifine/update.py index 999f688be2850..7e7f449957cb8 100755 --- a/pkgs/tools/games/minecraft/optifine/update.py +++ b/pkgs/tools/games/minecraft/optifine/update.py @@ -8,53 +8,72 @@ import requests import subprocess + def nix_prefetch_sha256(name): - return subprocess.run(['nix-prefetch-url', '--type', 'sha256', 'https://optifine.net/download?f=' + name], capture_output=True, text=True).stdout.strip() + return subprocess.run( + [ + "nix-prefetch-url", + "--type", + "sha256", + "https://optifine.net/download?f=" + name, + ], + capture_output=True, + text=True, + ).stdout.strip() + # fetch download page sess = requests.session() -page = sess.get('https://optifine.net/downloads') +page = sess.get("https://optifine.net/downloads") tree = html.fromstring(page.content) # parse and extract main jar file names -href = tree.xpath('//tr[@class="downloadLine downloadLineMain"]/td[@class="colMirror"]/a/@href') -expr = re.compile('(OptiFine_)([0-9.]*)(.*)\.jar') -result = [ expr.search(x) for x in href ] +href = tree.xpath( + '//tr[@class="downloadLine downloadLineMain"]/td[@class="colMirror"]/a/@href' +) +expr = re.compile("(OptiFine_)([0-9.]*)(.*)\.jar") +result = [expr.search(x) for x in href] # format name, version and hash for each file catalogue = {} for i, r in enumerate(result): - index = r.group(1).lower() + r.group(2).replace('.', '_') + index = r.group(1).lower() + r.group(2).replace(".", "_") version = r.group(2) + r.group(3) - catalogue[index] = { - "version": version, - "sha256": nix_prefetch_sha256(r.group(0)) - } + catalogue[index] = {"version": version, "sha256": nix_prefetch_sha256(r.group(0))} # latest version should be the first entry if len(catalogue) > 0: - catalogue['optifine-latest'] = list(catalogue.values())[0] + catalogue["optifine-latest"] = list(catalogue.values())[0] # read previous versions d = os.path.dirname(os.path.abspath(__file__)) -with open(os.path.join(d, 'versions.json'), 'r') as f: +with open(os.path.join(d, "versions.json"), "r") as f: prev = json.load(f) # `maintainers/scripts/update.py` will extract stdout to write commit message # embed the commit message in json and print it -changes = [ { 'commitMessage': 'optifinePackages: update versions\n\n' } ] +changes = [{"commitMessage": "optifinePackages: update versions\n\n"}] # build a longest common subsequence, natural sorted by keys -for key, value in sorted({**prev, **catalogue}.items(), key=lambda item: [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', item[0])]): +for key, value in sorted( + {**prev, **catalogue}.items(), + key=lambda item: [ + int(s) if s.isdigit() else s for s in re.split(r"(\d+)", item[0]) + ], +): if key not in prev: - changes[0]['commitMessage'] += 'optifinePackages.{}: init at {}\n'.format(key, value['version']) - elif value['version'] != prev[key]['version']: - changes[0]['commitMessage'] += 'optifinePackages.{}: {} -> {}\n'.format(key, prev[key]['version'], value['version']) + changes[0]["commitMessage"] += "optifinePackages.{}: init at {}\n".format( + key, value["version"] + ) + elif value["version"] != prev[key]["version"]: + changes[0]["commitMessage"] += "optifinePackages.{}: {} -> {}\n".format( + key, prev[key]["version"], value["version"] + ) # print the changes in stdout print(json.dumps(changes)) # write catalogue to file -with open(os.path.join(d, 'versions.json'), 'w') as f: +with open(os.path.join(d, "versions.json"), "w") as f: json.dump(catalogue, f, indent=4) - f.write('\n') + f.write("\n") diff --git a/pkgs/tools/inputmethods/fcitx5/update.py b/pkgs/tools/inputmethods/fcitx5/update.py index 08b8f69386789..4b3a6a63b1506 100755 --- a/pkgs/tools/inputmethods/fcitx5/update.py +++ b/pkgs/tools/inputmethods/fcitx5/update.py @@ -5,39 +5,49 @@ import subprocess REPOS = [ - "libime", - "xcb-imdkit", - - "fcitx5", - "fcitx5-anthy", - "fcitx5-chewing", - "fcitx5-chinese-addons", - "fcitx5-configtool", - "fcitx5-gtk", - "fcitx5-hangul", - "fcitx5-lua", - "fcitx5-m17n", - "fcitx5-qt", - "fcitx5-rime", - "fcitx5-skk", - "fcitx5-table-extra", - "fcitx5-table-other", - "fcitx5-unikey" - ] + "libime", + "xcb-imdkit", + "fcitx5", + "fcitx5-anthy", + "fcitx5-chewing", + "fcitx5-chinese-addons", + "fcitx5-configtool", + "fcitx5-gtk", + "fcitx5-hangul", + "fcitx5-lua", + "fcitx5-m17n", + "fcitx5-qt", + "fcitx5-rime", + "fcitx5-skk", + "fcitx5-table-extra", + "fcitx5-table-other", + "fcitx5-unikey", +] OWNER = "fcitx" + def get_latest_tag(repo, owner=OWNER): - r = requests.get('https://api.github.com/repos/{}/{}/tags'.format(owner,repo)) + r = requests.get("https://api.github.com/repos/{}/{}/tags".format(owner, repo)) return r.json()[0].get("name") + def main(): for repo in REPOS: rev = get_latest_tag(repo) if repo == "fcitx5-qt": - subprocess.run(["nix-update", "--commit", "--version", rev, "qt6Packages.{}".format(repo)]) + subprocess.run( + [ + "nix-update", + "--commit", + "--version", + rev, + "qt6Packages.{}".format(repo), + ] + ) else: subprocess.run(["nix-update", "--commit", "--version", rev, repo]) + if __name__ == "__main__": - main () + main()