From a4499934d2971d9b052326e6d8ba4c45b4661801 Mon Sep 17 00:00:00 2001 From: luz paz Date: Tue, 21 Jun 2022 09:09:30 -0400 Subject: [PATCH] Fix various typos Found via `codespell -q 3 -S CHANGE_LOG -L acount,arry,asscii,ba,breal,documen,hge,inout,larg,nd,nin,splitted,te,tunnell,warmup,withs,wth` --- .../condarecipe.local/conda_build_config.yaml | 2 +- docs/dagmap/jquery.graphviz.svg.js | 2 +- docs/source/_ext/ghfiles.py | 2 +- docs/source/cuda/external-memory.rst | 2 +- docs/source/developer/contributing.rst | 2 +- docs/source/developer/hashing.rst | 2 +- docs/source/developer/literal.rst | 2 +- docs/source/developer/repomap.rst | 4 ++-- docs/source/proposals/jit-classes.rst | 6 +++--- docs/source/proposals/typing_recursion.rst | 4 ++-- docs/source/reference/envvars.rst | 2 +- docs/source/user/faq.rst | 4 ++-- docs/source/user/troubleshoot.rst | 2 +- mypy.ini | 2 +- numba/cext/listobject.c | 4 ++-- numba/cloudpickle/__init__.py | 2 +- numba/cloudpickle/cloudpickle.py | 6 +++--- numba/core/boxing.py | 2 +- numba/core/cgutils.py | 2 +- numba/core/codegen.py | 2 +- numba/core/controlflow.py | 2 +- numba/core/datamodel/packer.py | 2 +- numba/core/errors.py | 4 ++-- numba/core/externals.py | 2 +- numba/core/inline_closurecall.py | 2 +- numba/core/interpreter.py | 6 +++--- numba/core/ir_utils.py | 2 +- numba/core/removerefctpass.py | 3 +-- numba/core/runtime/nrtopt.py | 4 ++-- numba/core/targetconfig.py | 2 +- numba/core/typing/cffi_utils.py | 2 +- numba/core/typing/npydecl.py | 2 +- numba/core/typing/templates.py | 2 +- numba/core/utils.py | 2 +- numba/cpython/unicode.py | 2 +- numba/cuda/cuda_paths.py | 2 +- numba/cuda/cudadrv/devices.py | 2 +- numba/cuda/intrinsic_wrapper.py | 2 +- numba/cuda/tests/cudapy/test_gufunc_scalar.py | 2 +- numba/experimental/jitclass/_box.c | 2 +- numba/experimental/jitclass/base.py | 2 +- numba/misc/numba_gdbinfo.py | 2 +- numba/misc/numba_sysinfo.py | 2 +- numba/np/arrayobj.py | 12 ++++++------ numba/np/linalg.py | 2 +- numba/np/npdatetime_helpers.py | 4 ++-- numba/np/npyfuncs.py | 2 +- numba/np/ufunc/deviceufunc.py | 2 +- numba/np/ufunc/parallel.py | 2 +- numba/np/ufunc/ufuncbuilder.py | 2 +- numba/parfors/array_analysis.py | 6 +++--- numba/parfors/parfor.py | 4 ++-- numba/parfors/parfor_lowering.py | 2 +- numba/tests/npyufunc/test_gufunc.py | 4 ++-- numba/tests/test_array_constants.py | 2 +- numba/tests/test_buffer_protocol.py | 2 +- numba/tests/test_cli.py | 2 +- numba/tests/test_debuginfo.py | 2 +- numba/tests/test_dictimpl.py | 2 +- numba/tests/test_dyn_array.py | 4 ++-- numba/tests/test_ir_inlining.py | 4 ++-- numba/tests/test_numpyadapt.py | 2 +- numba/tests/test_parallel_backend.py | 2 +- numba/tests/test_random.py | 6 +++--- numba/tests/test_sort.py | 4 ++-- numba/tests/test_ssa.py | 2 +- numba/tests/test_svml.py | 6 +++--- numba/tests/test_unicode.py | 2 +- numba/typed/typedlist.py | 8 ++++---- 69 files changed, 101 insertions(+), 102 deletions(-) diff --git a/buildscripts/condarecipe.local/conda_build_config.yaml b/buildscripts/condarecipe.local/conda_build_config.yaml index 81b7d08c3d1..9798e4b695f 100644 --- a/buildscripts/condarecipe.local/conda_build_config.yaml +++ b/buildscripts/condarecipe.local/conda_build_config.yaml @@ -1,4 +1,4 @@ -# Numba/llvmlite stack needs an older compiler for backwards compatability. +# Numba/llvmlite stack needs an older compiler for backwards compatibility. c_compiler_version: # [linux] - 7 # [linux and (x86_64 or ppc64le)] - 9 # [linux and aarch64] diff --git a/docs/dagmap/jquery.graphviz.svg.js b/docs/dagmap/jquery.graphviz.svg.js index 5e6b74027a4..15bea27bd0d 100644 --- a/docs/dagmap/jquery.graphviz.svg.js +++ b/docs/dagmap/jquery.graphviz.svg.js @@ -63,7 +63,7 @@ animation: false, viewport: null }).on('hide.bs.tooltip', function() { - // keep them visible even if you acidentally mouse over + // keep them visible even if you accidentally mouse over if ($a.attr('data-tooltip-keepvisible')) { return false } diff --git a/docs/source/_ext/ghfiles.py b/docs/source/_ext/ghfiles.py index 2907ef4fdeb..d0320cb3aba 100644 --- a/docs/source/_ext/ghfiles.py +++ b/docs/source/_ext/ghfiles.py @@ -61,7 +61,7 @@ def ghfile_role(name, rawtext, text, lineno, inliner, options={}, content=[]): node = nodes.reference(rawtext, text, refuri=make_ref(text), **options) my_nodes.append(node) - # insert seperators if needed + # insert separators if needed if len(my_nodes) > 1: my_nodes = intersperse(my_nodes, nodes.Text(" | ")) return my_nodes, [] diff --git a/docs/source/cuda/external-memory.rst b/docs/source/cuda/external-memory.rst index 7564c7970bf..28a8f59f0a9 100644 --- a/docs/source/cuda/external-memory.rst +++ b/docs/source/cuda/external-memory.rst @@ -117,7 +117,7 @@ implementation follows: is handled internally within Numba. - It is optional to provide memory info from the ``get_memory_info`` method, which provides a count of the total and free memory on the device for the context. - It is preferrable to implement the method, but this may not be practical for + It is preferable to implement the method, but this may not be practical for all allocators. If memory info is not provided, this method should raise a :class:`RuntimeError`. - The ``defer_cleanup`` method should return a context manager that ensures that diff --git a/docs/source/developer/contributing.rst b/docs/source/developer/contributing.rst index 0648db2376d..9ec5f9d9ffc 100644 --- a/docs/source/developer/contributing.rst +++ b/docs/source/developer/contributing.rst @@ -289,7 +289,7 @@ circumstances should ``type: ignore`` comments be used. If you are contributing a new feature, we encourage you to use type hints, even if the file is not currently in the checklist. If you want to contribute type hints to enable a new file to be in the checklist, please add the file to the -``files`` variable in ``mypy.ini``, and decide what level of compliance you are targetting. Level 3 is basic static +``files`` variable in ``mypy.ini``, and decide what level of compliance you are targeting. Level 3 is basic static checks, while levels 2 and 1 represent stricter checking. The levels are described in details in ``mypy.ini``. There is potential for confusion between the Numba module ``typing`` and Python built-in module ``typing`` used for type diff --git a/docs/source/developer/hashing.rst b/docs/source/developer/hashing.rst index 3de12ffd353..b955324413f 100644 --- a/docs/source/developer/hashing.rst +++ b/docs/source/developer/hashing.rst @@ -48,7 +48,7 @@ The accommodation of ``PYTHONHASHSEED`` --------------------------------------- The ``PYTHONHASHSEED`` environment variable can be used to seed the CPython -hashing algorithms for e.g. the purposes of reproduciblity. The Numba hashing +hashing algorithms for e.g. the purposes of reproducibility. The Numba hashing implementation directly reads the CPython hashing algorithms' internal state and as a result the influence of ``PYTHONHASHSEED`` is replicated in Numba's hashing implementations. diff --git a/docs/source/developer/literal.rst b/docs/source/developer/literal.rst index 7cda8718dc5..dd6d8d18723 100644 --- a/docs/source/developer/literal.rst +++ b/docs/source/developer/literal.rst @@ -8,7 +8,7 @@ Notes on Literal Types some limitations of the compilation mechanism relating to types. Some features need to specialize based on the literal value during -compliation to produce type stable code necessary for successful compilation in +compilation to produce type stable code necessary for successful compilation in Numba. This can be achieved by propagating the literal value through the type system. Numba recognizes inline literal values as :class:`numba.types.Literal`. For example:: diff --git a/docs/source/developer/repomap.rst b/docs/source/developer/repomap.rst index 3e49ea9e648..12bfd1b166f 100644 --- a/docs/source/developer/repomap.rst +++ b/docs/source/developer/repomap.rst @@ -542,9 +542,9 @@ Note that the CUDA target does reuse some parts of the CPU target. - :ghfile:`numba/cuda/compiler.py` - Compiler pipeline for CUDA target - :ghfile:`numba/cuda/intrinsic_wrapper.py` - CUDA device intrinsics (shuffle, ballot, etc) -- :ghfile:`numba/cuda/initialize.py` - Defered initialization of the CUDA +- :ghfile:`numba/cuda/initialize.py` - Deferred initialization of the CUDA device and subsystem. Called only when user imports ``numba.cuda`` -- :ghfile:`numba/cuda/simulator_init.py` - Initalizes the CUDA simulator +- :ghfile:`numba/cuda/simulator_init.py` - Initializes the CUDA simulator subsystem (only when user requests it with env var) - :ghfile:`numba/cuda/random.py` - Implementation of random number generator - :ghfile:`numba/cuda/api.py` - User facing APIs imported into ``numba.cuda.*`` diff --git a/docs/source/proposals/jit-classes.rst b/docs/source/proposals/jit-classes.rst index 9ddf67abd13..a321421f6a5 100644 --- a/docs/source/proposals/jit-classes.rst +++ b/docs/source/proposals/jit-classes.rst @@ -38,8 +38,8 @@ With these operations, a class object (not the instance) does not need to be materialize. Using the class object as a constructor is fully resolved (a runtime implementation is picked) during the typing phase in the compiler. This means **a class object will not be first class**. On the other hand, -implementating a first-class class object will require an -"interface" type, or the type of class. +implementing a first-class class object will require an "interface" type, +or the type of class. The instantiation of a class will allocate resources for storing the data attributes. This is described in the "Storage model" section. Methods are @@ -69,7 +69,7 @@ C structure:: complex64 field2; }; -This will also be comptabile with an aligned NumPy structured dtype. +This will also be compatible with an aligned numpy structure dtype. Methods diff --git a/docs/source/proposals/typing_recursion.rst b/docs/source/proposals/typing_recursion.rst index 2b671903e87..a33a3a4b5a6 100644 --- a/docs/source/proposals/typing_recursion.rst +++ b/docs/source/proposals/typing_recursion.rst @@ -13,7 +13,7 @@ This document proposes an enhancement to the type inference algorithm to support recursion without explicitly annotating the function signature. As a result, the proposal enables numba to type-infer both self-recursive and mutual-recursive functions under some limitations. In practice, these -limitions can be easily overcome by specifying a compilation order. +limitations can be easily overcome by specifying a compilation order. The Current State @@ -38,7 +38,7 @@ which in turns call ``foo()``:: return foo(x - 1) -The type inferrence process of ``foo()`` depends on that of ``bar()``, +The type inference process of ``foo()`` depends on that of ``bar()``, which depends on ``foo()``. Therefore ``foo()`` depends on itself and the type inference algorithm cannot terminate. diff --git a/docs/source/reference/envvars.rst b/docs/source/reference/envvars.rst index 1ea00117867..ec28ed816e8 100644 --- a/docs/source/reference/envvars.rst +++ b/docs/source/reference/envvars.rst @@ -86,7 +86,7 @@ These variables influence what is printed out during compilation of - ``"old_style"`` (default): this is the exception handling behaviour that is present in Numba versions <= 0.54.x. Numba will capture and wrap all - errors occuring in compilation and depending on the compilation phase they + errors occurring in compilation and depending on the compilation phase they will likely materialize as part of the message in a ``TypingError`` or a ``LoweringError``. - ``"new_style"`` this will treat any exception that does not inherit from diff --git a/docs/source/user/faq.rst b/docs/source/user/faq.rst index c598259b948..ac76874f54c 100644 --- a/docs/source/user/faq.rst +++ b/docs/source/user/faq.rst @@ -281,7 +281,7 @@ A more radical alternative is :ref:`ahead-of-time compilation `. GPU Programming =============== -How do I work around the ``CUDA intialized before forking`` error? +How do I work around the ``CUDA initialized before forking`` error? ------------------------------------------------------------------ On Linux, the ``multiprocessing`` module in the Python standard library @@ -298,7 +298,7 @@ available GPUs before starting the process pool. In Python 3, you can change the process start method, as described in the `multiprocessing documentation `_. Switching from ``fork`` to ``spawn`` or ``forkserver`` will avoid the CUDA -initalization issue, although the child processes will not inherit any global +initialization issue, although the child processes will not inherit any global variables from their parent. diff --git a/docs/source/user/troubleshoot.rst b/docs/source/user/troubleshoot.rst index b8ba85d8664..b921443460b 100644 --- a/docs/source/user/troubleshoot.rst +++ b/docs/source/user/troubleshoot.rst @@ -371,7 +371,7 @@ In order to debug code, it is possible to disable JIT compilation, which makes the ``jit`` decorator (and the ``njit`` decorator) act as if they perform no operation, and the invocation of decorated functions calls the original Python function instead of a compiled version. This can be toggled by -setting the :envvar:`NUMBA_DISABLE_JIT` enviroment variable to ``1``. +setting the :envvar:`NUMBA_DISABLE_JIT` environment variable to ``1``. When this mode is enabled, the ``vectorize`` and ``guvectorize`` decorators will still result in compilation of a ufunc, as there is no straightforward pure diff --git a/mypy.ini b/mypy.ini index 0b2ffdfa3e0..0b790befd0e 100644 --- a/mypy.ini +++ b/mypy.ini @@ -34,7 +34,7 @@ files = **/numba/core/types/*.py, **/numba/core/datamodel/*.py, **/numba/core/re ;follow_imports = normal # Level 3 - modules that pass mypy default settings (only those in `files` global setting and not in previous levels) -# Function/variables are annotated to avoid mypy erros, but annotations are not complete. +# Function/variables are annotated to avoid mypy errors, but annotations are not complete. [mypy-numba.core.*] warn_return_any = True diff --git a/numba/cext/listobject.c b/numba/cext/listobject.c index 972921e4e96..9de03f5ff1d 100644 --- a/numba/cext/listobject.c +++ b/numba/cext/listobject.c @@ -509,7 +509,7 @@ numba_list_delete_slice(NB_List *lp, lp->methods.item_decref(loc); } } - // memove items into place + // memmove items into place leftover_bytes = (lp->size - stop) * lp->item_size; loc = lp->items + lp->item_size * start; new_loc = lp->items + lp->item_size * stop; @@ -538,7 +538,7 @@ numba_list_delete_slice(NB_List *lp, // decref item being removed loc = lp->items + lp->item_size * cur; list_decref_item(lp, loc); - /* memmove the aforementiond step-1 (or less) items + /* memmove the aforementioned step-1 (or less) items * dst : index of deleted item minus total deleted sofar * src : index of deleted item plus one (next item) */ diff --git a/numba/cloudpickle/__init__.py b/numba/cloudpickle/__init__.py index e40ba3ec1e3..231a09795af 100644 --- a/numba/cloudpickle/__init__.py +++ b/numba/cloudpickle/__init__.py @@ -1,6 +1,6 @@ from __future__ import absolute_import -# NOTE: The following imports are adapted to use as a vendored subpackge. +# NOTE: The following imports are adapted to use as a vendored subpackage. # from https://github.com/cloudpipe/cloudpickle/blob/d3279a0689b769d5315fc6ff00cd0f5897844526/cloudpickle/init.py from .cloudpickle import * # noqa from .cloudpickle_fast import CloudPickler, dumps, dump # noqa diff --git a/numba/cloudpickle/cloudpickle.py b/numba/cloudpickle/cloudpickle.py index f9cd4486a27..f7fd4dcbf4f 100644 --- a/numba/cloudpickle/cloudpickle.py +++ b/numba/cloudpickle/cloudpickle.py @@ -88,7 +88,7 @@ def g(): DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL # Track the provenance of reconstructed dynamic classes to make it possible to -# recontruct instances from the matching singleton class definition when +# reconstruct instances from the matching singleton class definition when # appropriate and preserve the usual "isinstance" semantics of Python objects. _DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary() _DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary() @@ -242,7 +242,7 @@ def _extract_code_globals(co): out_names = {names[oparg] for _, oparg in _walk_global_ops(co)} # Declaring a function inside another one using the "def ..." - # syntax generates a constant code object corresonding to the one + # syntax generates a constant code object corresponding to one # of the nested function's As the nested function may itself need # global variables, we need to introspect its code, extract its # globals, (look for code object in it's co_consts attribute..) and @@ -486,7 +486,7 @@ def _create_parametrized_type_hint(origin, args): def parametrized_type_hint_getinitargs(obj): - # The distorted type check sematic for typing construct becomes: + # The distorted type check semantic for typing construct becomes: # ``type(obj) is type(TypeHint)``, which means "obj is a # parametrized TypeHint" if type(obj) is type(Literal): # pragma: no branch diff --git a/numba/core/boxing.py b/numba/core/boxing.py index bcde077ef1e..011d3a87b36 100644 --- a/numba/core/boxing.py +++ b/numba/core/boxing.py @@ -1230,7 +1230,7 @@ def object_getattr_safely(obj, attr): with early_exit_if_null(c.builder, stack, ct_voidptr_ty): handle_failure() - # This wires in the fnptrs refered to by name + # This wires in the fnptrs referred to by name def wire_in_fnptrs(name): # Find the CFunctionType function interface_next_fn = c.pyapi.object_getattr_string( diff --git a/numba/core/cgutils.py b/numba/core/cgutils.py index f237507f565..1a8bef63166 100644 --- a/numba/core/cgutils.py +++ b/numba/core/cgutils.py @@ -361,7 +361,7 @@ def _setvalue(self, value): def alloca_once(builder, ty, size=None, name='', zfill=False): """Allocate stack memory at the entry block of the current function - pointed by ``builder`` withe llvm type ``ty``. The optional ``size`` arg + pointed by ``builder`` with llvm type ``ty``. The optional ``size`` arg set the number of element to allocate. The default is 1. The optional ``name`` arg set the symbol name inside the llvm IR for debugging. If ``zfill`` is set, fill the memory with zeros at the current diff --git a/numba/core/codegen.py b/numba/core/codegen.py index c2ba978bafd..e988fab1263 100644 --- a/numba/core/codegen.py +++ b/numba/core/codegen.py @@ -202,7 +202,7 @@ def init_digraph(name, fname, fontsize): # when trying to render to pdf cmax = 200 if len(fname) > cmax: - wstr = (f'CFG output filname "{fname}" exceeds maximum ' + wstr = (f'CFG output filename "{fname}" exceeds maximum ' f'supported length, it will be truncated.') warnings.warn(wstr, NumbaInvalidConfigWarning) fname = fname[:cmax] diff --git a/numba/core/controlflow.py b/numba/core/controlflow.py index 77e284bbbc5..25a9790d6ac 100644 --- a/numba/core/controlflow.py +++ b/numba/core/controlflow.py @@ -181,7 +181,7 @@ def dominance_frontier(self): The dominance frontier _df(N) is the set of all nodes that are immediate successors to blocks dominanted by N but which aren't - stricly dominanted by N + strictly dominanted by N """ return self._df diff --git a/numba/core/datamodel/packer.py b/numba/core/datamodel/packer.py index 512637c8b6b..9efc51449bc 100644 --- a/numba/core/datamodel/packer.py +++ b/numba/core/datamodel/packer.py @@ -64,7 +64,7 @@ class ArgPacker(object): It maintains a position map for unflattening the arguments. Since struct (esp. nested struct) have specific ABI requirements (e.g. - alignemnt, pointer address-space, ...) in different architecture (e.g. + alignment, pointer address-space, ...) in different architecture (e.g. OpenCL, CUDA), flattening composite argument types simplifes the call setup from the Python side. Functions are receiving simple primitive types and there are only a handful of these. diff --git a/numba/core/errors.py b/numba/core/errors.py index 25725af9847..badb44b9f4f 100644 --- a/numba/core/errors.py +++ b/numba/core/errors.py @@ -614,7 +614,7 @@ def __init__(self, msg, loc=None): class UnsupportedParforsError(NumbaError): """ - An error ocurred because parfors is not supported on the platform. + An error ocurrred because parfors is not supported on the platform. """ pass @@ -673,7 +673,7 @@ def __init__(self, value, loc=None): class InternalError(NumbaError): """ - For wrapping internal error occured within the compiler + For wrapping internal error occurred within the compiler """ def __init__(self, exception): diff --git a/numba/core/externals.py b/numba/core/externals.py index 19063c76216..e181b5f43d9 100644 --- a/numba/core/externals.py +++ b/numba/core/externals.py @@ -51,7 +51,7 @@ def compile_multi3(context): bb = fn.append_basic_block() builder = ir.IRBuilder(bb) - # This implementation mimicks compiler-rt's. + # This implementation mimics compiler-rt's. al = builder.trunc(a, i64) bl = builder.trunc(b, i64) ah = builder.trunc(builder.ashr(a, _64), i64) diff --git a/numba/core/inline_closurecall.py b/numba/core/inline_closurecall.py index 25773c072f1..badb6b49d7a 100644 --- a/numba/core/inline_closurecall.py +++ b/numba/core/inline_closurecall.py @@ -1400,7 +1400,7 @@ def inline_array(array_var, expr, stmts, list_vars, dels): ir.Expr.build_tuple(items=[size_var], loc=loc), loc)) # The general approach is to create an empty array and then fill - # the elements in one-by-one from their specificiation. + # the elements in one-by-one from their specification. # Get the numpy type to pass to empty. nptype = types.DType(dtype) diff --git a/numba/core/interpreter.py b/numba/core/interpreter.py index 7c036cde6db..71ea49831b3 100644 --- a/numba/core/interpreter.py +++ b/numba/core/interpreter.py @@ -374,11 +374,11 @@ def _call_function_ex_replace_args_large( ) search_end -= 1 else: - # There must always be an initial assignement + # There must always be an initial assignment # https://github.com/numba/numba/blob/59fa2e335be68148b3bd72a29de3ff011430038d/numba/core/interpreter.py#L259-L260 # If this changes we may need to support this branch. raise AssertionError("unreachable") - # Traverse backwards to find all concatentations + # Traverse backwards to find all concatenations # until eventually reaching the original empty tuple. while search_end >= search_start: concat_stmt = old_body[search_end] @@ -415,7 +415,7 @@ def _call_function_ex_replace_args_large( raise UnsupportedError(errmsg) lhs_name = concat_stmt.value.lhs.name rhs_name = concat_stmt.value.rhs.name - # The previous statment should be a + # The previous statement should be a # build_tuple containing the arg. arg_tuple_stmt = old_body[search_end - 1] if not ( diff --git a/numba/core/ir_utils.py b/numba/core/ir_utils.py index 1ca1d412b5b..ad709042354 100644 --- a/numba/core/ir_utils.py +++ b/numba/core/ir_utils.py @@ -1920,7 +1920,7 @@ def is_namedtuple_class(c): def fill_block_with_call(newblock, callee, label_next, inputs, outputs): """Fill *newblock* to call *callee* with arguments listed in *inputs*. - The returned values are unwraped into variables in *outputs*. + The returned values are unwrapped into variables in *outputs*. The block would then jump to *label_next*. """ scope = newblock.scope diff --git a/numba/core/removerefctpass.py b/numba/core/removerefctpass.py index e22343aa151..98c04869a0b 100644 --- a/numba/core/removerefctpass.py +++ b/numba/core/removerefctpass.py @@ -107,8 +107,7 @@ def remove_unnecessary_nrt_usage(function, context, fndesc): - call function(s) that return refcounted object. In effect, the function will not capture or create references that extend - the lifetime of any refcounted objects beyound the lifetime of the - function. + the lifetime of any refcounted objects beyond the lifetime of the function. The rewrite is performed in place. If rewrite has happened, this function returns True, otherwise, it returns False. diff --git a/numba/core/runtime/nrtopt.py b/numba/core/runtime/nrtopt.py index 8fd7bb4d6a2..2a6f56b0910 100644 --- a/numba/core/runtime/nrtopt.py +++ b/numba/core/runtime/nrtopt.py @@ -10,9 +10,9 @@ _regex_decref = re.compile(r'\s*(?:tail)?\s*call void @NRT_decref\((.*)\)') _regex_bb = re.compile( r'|'.join([ - # unamed BB is just a plain number + # unnamed BB is just a plain number r'[0-9]+:', - # with a proper identifer (see llvm langref) + # with a proper identifier (see llvm langref) r'[\'"]?[-a-zA-Z$._0-9][-a-zA-Z$._0-9]*[\'"]?:', # is a start of a function definition r'^define', diff --git a/numba/core/targetconfig.py b/numba/core/targetconfig.py index c69b98f8c28..1ef986fed13 100644 --- a/numba/core/targetconfig.py +++ b/numba/core/targetconfig.py @@ -146,7 +146,7 @@ class TargetConfig(metaclass=_MetaTargetConfig): >>> a_bool_option = Option(type=bool, default=False, doc="a bool") >>> an_int_option = Option(type=int, default=0, doc="an int") - The metaclass will insert properties for each ``Option``. For exapmle: + The metaclass will insert properties for each ``Option``. For example: >>> tc = MyTargetConfig() >>> tc.a_bool_option = True # invokes the setter diff --git a/numba/core/typing/cffi_utils.py b/numba/core/typing/cffi_utils.py index 022e7642daf..ee30b81a124 100644 --- a/numba/core/typing/cffi_utils.py +++ b/numba/core/typing/cffi_utils.py @@ -146,7 +146,7 @@ def map_struct_to_record_dtype(cffi_type): } is_aligned = True for k, v in cffi_type.fields: - # guard unsupport values + # guard unsupported values if v.bitshift != -1: msg = "field {!r} has bitshift, this is not supported" raise ValueError(msg.format(k)) diff --git a/numba/core/typing/npydecl.py b/numba/core/typing/npydecl.py index 82b631171a4..92060ffe0c8 100644 --- a/numba/core/typing/npydecl.py +++ b/numba/core/typing/npydecl.py @@ -491,7 +491,7 @@ def _parse_nested_sequence(context, typ): return n + 1, dtype elif isinstance(typ, (types.BaseTuple,)): if typ.count == 0: - # Mimick Numpy's behaviour + # Mimic Numpy's behaviour return 1, types.float64 n, dtype = _parse_nested_sequence(context, typ[0]) dtypes = [dtype] diff --git a/numba/core/typing/templates.py b/numba/core/typing/templates.py index 62485a3e635..caf88fe17b9 100644 --- a/numba/core/typing/templates.py +++ b/numba/core/typing/templates.py @@ -673,7 +673,7 @@ def generic(self, args, kws): # needs to exist for type resolution # NOTE: If lowering is failing on a `_EmptyImplementationEntry`, - # the inliner has failed to inline this entry corretly. + # the inliner has failed to inline this entry correctly. impl_init = _EmptyImplementationEntry('always inlined') self._compiled_overloads[sig.args] = impl_init if not self._inline.is_always_inline: diff --git a/numba/core/utils.py b/numba/core/utils.py index 06e97ff3fbc..2e252818594 100644 --- a/numba/core/utils.py +++ b/numba/core/utils.py @@ -175,7 +175,7 @@ def shutting_down(globals=globals): # which atexit is True. Some of these finalizers may call shutting_down() to # check whether the interpreter is shutting down. For this to behave correctly, # we need to make sure that _at_shutdown is called before the finalizer exit -# function. Since atexit operates as a LIFO stack, we first contruct a dummy +# function. Since atexit operates as a LIFO stack, we first construct a dummy # finalizer then register atexit to ensure this ordering. weakref.finalize(lambda: None, lambda: None) atexit.register(_at_shutdown) diff --git a/numba/cpython/unicode.py b/numba/cpython/unicode.py index d7b2674d836..a14f3191061 100644 --- a/numba/cpython/unicode.py +++ b/numba/cpython/unicode.py @@ -1790,7 +1790,7 @@ def unicode_replace(s, old_str, new_str, count=-1): thety = count.type if not isinstance(thety, (int, types.Integer)): - raise TypingError('Unsupported parameters. The parametrs ' + raise TypingError('Unsupported parameters. The parameters ' 'must be Integer. Given count: {}'.format(count)) if not isinstance(old_str, (types.UnicodeType, types.NoneType)): diff --git a/numba/cuda/cuda_paths.py b/numba/cuda/cuda_paths.py index 3a49d52070f..0da435d331a 100644 --- a/numba/cuda/cuda_paths.py +++ b/numba/cuda/cuda_paths.py @@ -100,7 +100,7 @@ def get_conda_ctk(): is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta')) if not is_conda_env: return - # Asssume the existence of NVVM to imply cudatoolkit installed + # Assume the existence of NVVM to imply cudatoolkit installed paths = find_lib('nvvm') if not paths: return diff --git a/numba/cuda/cudadrv/devices.py b/numba/cuda/cudadrv/devices.py index 0f6c4037f7a..6cc9e2e393f 100644 --- a/numba/cuda/cudadrv/devices.py +++ b/numba/cuda/cudadrv/devices.py @@ -4,7 +4,7 @@ This module implements a API that is like the "CUDA runtime" context manager for managing CUDA context stack and clean up. It relies on thread-local globals to separate the context stack management of each thread. Contexts are also -sharable among threads. Only the main thread can destroy Contexts. +shareable among threads. Only the main thread can destroy Contexts. Note: - This module must be imported by the main-thread. diff --git a/numba/cuda/intrinsic_wrapper.py b/numba/cuda/intrinsic_wrapper.py index cd3d563fe95..e02639f2122 100644 --- a/numba/cuda/intrinsic_wrapper.py +++ b/numba/cuda/intrinsic_wrapper.py @@ -32,7 +32,7 @@ def eq_sync(mask, predicate): @jit(device=True) def ballot_sync(mask, predicate): """ - Returns a mask of all threads in the warp whoose predicate is true, + Returns a mask of all threads in the warp whose predicate is true, and are within the given mask. """ return numba.cuda.vote_sync_intrinsic(mask, 3, predicate)[0] diff --git a/numba/cuda/tests/cudapy/test_gufunc_scalar.py b/numba/cuda/tests/cudapy/test_gufunc_scalar.py index 2b3647511c4..493a9ceec5e 100644 --- a/numba/cuda/tests/cudapy/test_gufunc_scalar.py +++ b/numba/cuda/tests/cudapy/test_gufunc_scalar.py @@ -29,7 +29,7 @@ def sum_row(inp, out): # inp is (10000, 3) # out is (10000) - # The outter (leftmost) dimension must match or numpy broadcasting + # The outer (leftmost) dimension must match or numpy broadcasting # is performed. But, broadcasting on CUDA arrays is not supported. inp = np.arange(300, dtype=np.int32).reshape(100, 3) diff --git a/numba/experimental/jitclass/_box.c b/numba/experimental/jitclass/_box.c index bf49de4fca8..df9473ff34e 100644 --- a/numba/experimental/jitclass/_box.c +++ b/numba/experimental/jitclass/_box.c @@ -11,7 +11,7 @@ typedef struct { /* Store function defined in numba.core.runtime._nrt_python for use in box_dealloc. * It points to a function is code segment that does not need user deallocation - * and does not disappear while the process is stil running. + * and does not disappear while the process is still running. */ static void (*MemInfo_release)(void*) = NULL; diff --git a/numba/experimental/jitclass/base.py b/numba/experimental/jitclass/base.py index d61dc60e3f4..105c66bfd97 100644 --- a/numba/experimental/jitclass/base.py +++ b/numba/experimental/jitclass/base.py @@ -332,7 +332,7 @@ def _register_methods(self, registry, instance_type): Register method implementations. This simply registers that the method names are valid methods. Inside of imp() below we retrieve the actual method to run from the type of - the reciever argument (i.e. self). + the receiver argument (i.e. self). """ to_register = list(instance_type.jit_methods) + \ list(instance_type.jit_static_methods) diff --git a/numba/misc/numba_gdbinfo.py b/numba/misc/numba_gdbinfo.py index f5e9f1ddc51..db92cd24b6c 100644 --- a/numba/misc/numba_gdbinfo.py +++ b/numba/misc/numba_gdbinfo.py @@ -131,7 +131,7 @@ def collect_gdbinfo(): def display_gdbinfo(sep_pos=45): - """Displays the infomation collected by collect_gdbinfo. + """Displays the information collected by collect_gdbinfo. """ gdb_info = collect_gdbinfo() print('-' * 80) diff --git a/numba/misc/numba_sysinfo.py b/numba/misc/numba_sysinfo.py index d7396d5ed2f..79860fac624 100644 --- a/numba/misc/numba_sysinfo.py +++ b/numba/misc/numba_sysinfo.py @@ -336,7 +336,7 @@ def get_sysinfo(): msg_not_found = "CUDA driver library cannot be found" msg_disabled_by_user = "CUDA is disabled" msg_end = " or no CUDA enabled devices are present." - msg_generic_problem = "CUDA device intialisation problem." + msg_generic_problem = "CUDA device initialisation problem." msg = getattr(e, 'msg', None) if msg is not None: if msg_not_found in msg: diff --git a/numba/np/arrayobj.py b/numba/np/arrayobj.py index e445afb03f2..fbcc5e48603 100644 --- a/numba/np/arrayobj.py +++ b/numba/np/arrayobj.py @@ -1098,7 +1098,7 @@ def offset_bounds_from_strides(context, builder, arrty, arr, shapes, strides): Compute a half-open range [lower, upper) of byte offsets from the array's data pointer, that bound the in-memory extent of the array. - This mimicks offset_bounds_from_strides() from + This mimics offset_bounds_from_strides() from numpy/core/src/private/mem_overlap.c """ itemsize = arr.itemsize @@ -1617,7 +1617,7 @@ def src_cleanup(): source_indices = tuple(c for c in counts if c is not None) val = src_getitem(source_indices) - # Cast to the destination dtype (cross-dtype slice assignement is allowed) + # Cast to the destination dtype (cross-dtype slice assignment is allowed) val = context.cast(builder, val, src_dtype, aryty.dtype) # No need to check for wraparound, as the indexers all ensure @@ -2588,7 +2588,7 @@ def array_flags(context, builder, typ, value): @lower_getattr(types.ArrayFlags, "c_contiguous") def array_flags_c_contiguous(context, builder, typ, value): if typ.array_type.layout != 'C': - # any layout can stil be contiguous + # any layout can still be contiguous flagsobj = context.make_helper(builder, typ, value=value) res = _call_contiguous_check(is_contiguous, context, builder, typ.array_type, flagsobj.parent) @@ -2601,7 +2601,7 @@ def array_flags_c_contiguous(context, builder, typ, value): @lower_getattr(types.ArrayFlags, "f_contiguous") def array_flags_f_contiguous(context, builder, typ, value): if typ.array_type.layout != 'F': - # any layout can stil be contiguous + # any layout can still be contiguous flagsobj = context.make_helper(builder, typ, value=value) res = _call_contiguous_check(is_fortran, context, builder, typ.array_type, flagsobj.parent) @@ -2982,7 +2982,7 @@ def _increment_indices(context, builder, ndim, shape, indices, end_flag=None, count = shape[dim] in_bounds = builder.icmp_signed('<', idx, count) with cgutils.if_likely(builder, in_bounds): - # New index is still in bounds + # New index is stilll in bounds builder.store(idx, idxptr) if loop_continue is not None: loop_continue(dim) @@ -4541,7 +4541,7 @@ def _as_layout_array(context, builder, sig, args, output_layout): else: if aryty.layout == 'A': - # There's still chance the array is in contiguous layout, + # There's stilll chance the array is in contiguous layout, # just that we don't know at compile time. # We can do a runtime check. diff --git a/numba/np/linalg.py b/numba/np/linalg.py index 3b224b83a4c..0f36bf28a13 100644 --- a/numba/np/linalg.py +++ b/numba/np/linalg.py @@ -2648,7 +2648,7 @@ def _kron_normaliser_impl(x): if isinstance(x, types.Array): if x.layout not in ('C', 'F'): raise TypingError("np.linalg.kron only supports 'C' or 'F' layout " - "input arrays. Receieved an input of " + "input arrays. Received an input of " "layout '{}'.".format(x.layout)) elif x.ndim == 2: @register_jitable diff --git a/numba/np/npdatetime_helpers.py b/numba/np/npdatetime_helpers.py index 0828ade3e76..9bb9b8523bd 100644 --- a/numba/np/npdatetime_helpers.py +++ b/numba/np/npdatetime_helpers.py @@ -45,7 +45,7 @@ def same_kind(src, dest): def can_cast_timedelta_units(src, dest): - # Mimick numpy's "safe" casting and promotion + # Mimic numpy's "safe" casting and promotion # `dest` must be more precise than `src` and they must be compatible # for conversion. # XXX should we switch to enforcing "same-kind" for Numpy 1.10+ ? @@ -89,7 +89,7 @@ def _get_conversion_multiplier(big_unit_code, small_unit_code): None is returned if the conversion is not possible through a simple integer multiplication. """ - # Mimicks get_datetime_units_factor() in numpy's datetime.c, + # Mimics get_datetime_units_factor() in numpy's datetime.c, # with a twist to allow no-op conversion from generic units. if big_unit_code == 14: return 1 diff --git a/numba/np/npyfuncs.py b/numba/np/npyfuncs.py index 39fa309c0fb..c475532f9ac 100644 --- a/numba/np/npyfuncs.py +++ b/numba/np/npyfuncs.py @@ -818,7 +818,7 @@ def np_real_reciprocal_impl(context, builder, sig, args): def np_complex_reciprocal_impl(context, builder, sig, args): # based on the implementation in loops.c.src # Basically the same Smith method used for division, but with - # the numerator substitued by 1.0 + # the numerator substituted by 1.0 _check_arity_and_homogeneity(sig, args, 1) ty = sig.args[0] diff --git a/numba/np/ufunc/deviceufunc.py b/numba/np/ufunc/deviceufunc.py index 96c726a5333..f67f28826d2 100644 --- a/numba/np/ufunc/deviceufunc.py +++ b/numba/np/ufunc/deviceufunc.py @@ -666,7 +666,7 @@ def _schedule(self, inputs, out): try: outdtype, kernel = self.kernelmap[idtypes] except KeyError: - # No exact match, then use the first compatbile. + # No exact match, then use the first compatible. # This does not match the numpy dispatching exactly. # Later, we may just jit a new version for the missing signature. idtypes = self._search_matching_signature(idtypes) diff --git a/numba/np/ufunc/parallel.py b/numba/np/ufunc/parallel.py index d6b72c5f47c..e75903c7143 100644 --- a/numba/np/ufunc/parallel.py +++ b/numba/np/ufunc/parallel.py @@ -182,7 +182,7 @@ class ParallelUFuncBuilder(ufuncbuilder.UFuncBuilder): def build(self, cres, sig): _launch_threads() - # Buider wrapper for ufunc entry point + # Builder wrapper for ufunc entry point ctx = cres.target_context signature = cres.signature library = cres.library diff --git a/numba/np/ufunc/ufuncbuilder.py b/numba/np/ufunc/ufuncbuilder.py index 1b83c3bd703..a2847883a65 100644 --- a/numba/np/ufunc/ufuncbuilder.py +++ b/numba/np/ufunc/ufuncbuilder.py @@ -374,7 +374,7 @@ def build(self, cres): """ Returns (dtype numbers, function ptr, EnvironmentObject) """ - # Buider wrapper for ufunc entry point + # Builder wrapper for ufunc entry point signature = cres.signature info = build_gufunc_wrapper( self.py_func, cres, self.sin, self.sout, diff --git a/numba/parfors/array_analysis.py b/numba/parfors/array_analysis.py index 632cc88e2a7..18a8aa115da 100644 --- a/numba/parfors/array_analysis.py +++ b/numba/parfors/array_analysis.py @@ -384,7 +384,7 @@ def __init__( self.typemap = typemap # defs maps variable name to an int, where # 1 means the variable is defined only once, and numbers greater - # than 1 means defined more than onces. + # than 1 means defined more than once. self.defs = defs if defs else {} # ind_to_var maps index number to a list of variables (of ir.Var type). # It is used to retrieve defined shape variables given an equivalence @@ -768,7 +768,7 @@ def __init__( # means A is defined as: A = B + i, where A,B are variable names, # and i is an integer constants. self.def_by = def_by if def_by else {} - # A "refered-by" table that maps A to a list of [(B, i), (C, j) ...], + # A "referred-by" table that maps A to a list of [(B, i), (C, j) ...], # which implies a sequence of definitions: B = A - i, C = A - j, and # so on, where A,B,C,... are variable names, and i,j,... are # integer constants. @@ -1788,7 +1788,7 @@ def slice_size(self, index, dsize, equiv_set, scope, stmts): The computation takes care of negative values used in the slice with respect to the given dimensional size ("dsize"). - Extra statments required to produce the result are appended + Extra statements required to produce the result are appended to parent function's stmts list. """ loc = index.loc diff --git a/numba/parfors/parfor.py b/numba/parfors/parfor.py index 5105486646f..ba6dae93603 100644 --- a/numba/parfors/parfor.py +++ b/numba/parfors/parfor.py @@ -455,7 +455,7 @@ def linspace_3(start, stop, num): } def fill_parallel_impl(return_type, arr, val): - """Parallel implemention of ndarray.fill. The array on + """Parallel implementation of ndarray.fill. The array on which to operate is retrieved from get_call_name and is passed along with the value to fill. """ @@ -2773,7 +2773,7 @@ def unver(name): if isinstance(stmt, Parfor): self._replace_loop_access_indices(stmt.loop_body, index_set, new_index) - # remove added indices for currect recursive parfor handling + # remove added indices for correct recursive parfor handling index_set -= added_indices return diff --git a/numba/parfors/parfor_lowering.py b/numba/parfors/parfor_lowering.py index c6f9acb74e3..1c4a5e8a89c 100644 --- a/numba/parfors/parfor_lowering.py +++ b/numba/parfors/parfor_lowering.py @@ -1245,7 +1245,7 @@ def _create_gufunc_for_parfor_body( gufunc_txt += " = (" + ", ".join([param_dict[x] for x in exp_names]) if len(exp_names) == 1: # Add comma for tuples with singular values. We can't unilaterally - # add a comma alway because (,) isn't valid. + # add a comma always because (,) isn't valid. gufunc_txt += "," gufunc_txt += ")\n" diff --git a/numba/tests/npyufunc/test_gufunc.py b/numba/tests/npyufunc/test_gufunc.py index 0df62b5fdc9..c5c6f9c94cd 100644 --- a/numba/tests/npyufunc/test_gufunc.py +++ b/numba/tests/npyufunc/test_gufunc.py @@ -163,7 +163,7 @@ def sum_row(inp, out): # inp is (10000, 3) # out is (10000) - # The outter (leftmost) dimension must match or numpy broadcasting is performed. + # The outer (leftmost) dimension must match or numpy broadcasting is performed. self.assertTrue(sum_row.is_dynamic) inp = np.arange(30000, dtype=np.int32).reshape(10000, 3) @@ -287,7 +287,7 @@ def sum_row(inp, out): # inp is (10000, 3) # out is (10000) - # The outter (leftmost) dimension must match or numpy broadcasting is performed. + # The outer (leftmost) dimension must match or numpy broadcasting is performed. inp = np.arange(30000, dtype=np.int32).reshape(10000, 3) out = sum_row(inp) diff --git a/numba/tests/test_array_constants.py b/numba/tests/test_array_constants.py index ed1d13d30da..ac5a7e39f13 100644 --- a/numba/tests/test_array_constants.py +++ b/numba/tests/test_array_constants.py @@ -143,7 +143,7 @@ def pyfunc(): def test_too_big_to_freeze(self): """ Test issue https://github.com/numba/numba/issues/2188 where freezing - a constant array into the code thats prohibitively long and consume + a constant array into the code that's prohibitively long and consumes too much RAM. """ def test(biggie): diff --git a/numba/tests/test_buffer_protocol.py b/numba/tests/test_buffer_protocol.py index c5ace3f1df7..b680d819e72 100644 --- a/numba/tests/test_buffer_protocol.py +++ b/numba/tests/test_buffer_protocol.py @@ -278,7 +278,7 @@ def test_contiguous(self): self.assertIs(f_contiguous_usecase(m), True) for arr in self._arrays(): m = memoryview(arr) - # Note `arr.flags.contiguous` is wrong (it mimicks c_contiguous) + # Note `arr.flags.contiguous` is wrong (it mimics c_contiguous) self.assertIs(contiguous_usecase(m), arr.flags.f_contiguous or arr.flags.c_contiguous) self.assertIs(c_contiguous_usecase(m), arr.flags.c_contiguous) diff --git a/numba/tests/test_cli.py b/numba/tests/test_cli.py index 49f44c21e79..37b1dcefb65 100644 --- a/numba/tests/test_cli.py +++ b/numba/tests/test_cli.py @@ -272,7 +272,7 @@ def test_nonsense_gdb_binary(self): stdout, stderr = run_cmd(cmdline, env=env) self.assertIn("Testing gdb binary failed", stdout) # NOTE: should 'python' ever add support for the same flags as the gdb - # commands used in the infomation gathering code in `numba_gdbinfo` + # commands used in the information gathering code in `numba_gdbinfo` # this test will fail, it's reasonably unlikely. self.assertIn("Unknown option", stdout) diff --git a/numba/tests/test_debuginfo.py b/numba/tests/test_debuginfo.py index 85b4d8c2568..f7250a23356 100644 --- a/numba/tests/test_debuginfo.py +++ b/numba/tests/test_debuginfo.py @@ -648,7 +648,7 @@ def foo(): must_not_have_attrs=set(), ) expected_info[foo_debug_optnone_inline] = dict( - # optnone=True is overriden by forceinline, so this looks like the + # optnone=True is overridden by forceinline, so this looks like the # foo_debug version lines={0, firstline + 5}, must_have_attrs=set([b"alwaysinline"]), diff --git a/numba/tests/test_dictimpl.py b/numba/tests/test_dictimpl.py index b627bba4725..0bcbc542dc6 100644 --- a/numba/tests/test_dictimpl.py +++ b/numba/tests/test_dictimpl.py @@ -647,5 +647,5 @@ def set_parametrized_data(x, y): set_parametrized_data._reset_overloads() set_parametrized_data.targetctx.init() - for ii in range(50): # <- somtimes works a few times + for ii in range(50): # <- sometimes works a few times self.assertIsNone(set_parametrized_data(x, y)) diff --git a/numba/tests/test_dyn_array.py b/numba/tests/test_dyn_array.py index 45d5c2ac656..684e5c5b168 100644 --- a/numba/tests/test_dyn_array.py +++ b/numba/tests/test_dyn_array.py @@ -735,7 +735,7 @@ def test_alloc_size(self): width = types.intp.bitwidth def gen_func(shape, dtype): return lambda : pyfunc(shape, dtype) - # Under these values numba will segfault, but thats another issue + # Under these values numba will segfault, but that's another issue self.check_alloc_size(gen_func(1 << width - 2, np.intp)) self.check_alloc_size(gen_func((1 << width - 8, 64), np.intp)) @@ -860,7 +860,7 @@ def test_alloc_size(self): width = types.intp.bitwidth def gen_func(shape, value): return lambda : np.full(shape, value) - # Under these values numba will segfault, but thats another issue + # Under these values numba will segfault, but that's another issue self.check_alloc_size(gen_func(1 << width - 2, 1)) self.check_alloc_size(gen_func((1 << width - 8, 64), 1)) diff --git a/numba/tests/test_ir_inlining.py b/numba/tests/test_ir_inlining.py index 23195c30572..ce5909a732d 100644 --- a/numba/tests/test_ir_inlining.py +++ b/numba/tests/test_ir_inlining.py @@ -707,7 +707,7 @@ def overload_dummy_getitem(obj, idx): if isinstance(obj, DummyType): return dummy_getitem_impl - # noth getitem and static_getitem Exprs refer to opertor.getitem + # both getitem and static_getitem Exprs refer to operator.getitem # hence they are checked using the same expect key self.check(impl, Dummy(), 1, inline_expect={'getitem': False}) self.check(impl_static_getitem, Dummy(), @@ -1505,7 +1505,7 @@ def foo(z): @skip_parfors_unsupported def test_issue7380(self): # This checks that inlining a function containing a loop into another - # loop where the induction variable in boths loops is the same doesn't + # loop where the induction variable in both loops is the same doesn't # end up with a name collision. Parfors can detect this so it is used. # See: https://github.com/numba/numba/issues/7380 diff --git a/numba/tests/test_numpyadapt.py b/numba/tests/test_numpyadapt.py index b43b1a80a90..8df1f028813 100644 --- a/numba/tests/test_numpyadapt.py +++ b/numba/tests/test_numpyadapt.py @@ -7,7 +7,7 @@ class ArrayStruct3D(Structure): - # Mimick the structure defined in numba.targets.arrayobj's make_array() + # Mimic the structure defined in numba.targets.arrayobj's make_array() _fields_ = [ ("meminfo", c_void_p), ("parent", c_void_p), diff --git a/numba/tests/test_parallel_backend.py b/numba/tests/test_parallel_backend.py index 68a759dc7e3..3a1d3583cde 100644 --- a/numba/tests/test_parallel_backend.py +++ b/numba/tests/test_parallel_backend.py @@ -954,7 +954,7 @@ def test_fork_from_non_main_thread(self): # trigger a standard compilation of the function and the thread pools # won't have started yet as the parallelisation compiler passes for # `work` won't yet have run. This mitigates the fork() call from 1. - # occuring after 2. The result of this is that 3. can be tested using + # occurring after 2. The result of this is that 3. can be tested using # the threading etc herein with the state being known as the above # described, i.e. the TBB threading layer has not experienced a fork(). diff --git a/numba/tests/test_random.py b/numba/tests/test_random.py index 85930f23e55..59da7ff4b33 100644 --- a/numba/tests/test_random.py +++ b/numba/tests/test_random.py @@ -252,7 +252,7 @@ def _check_random_seed(self, seedfunc, randomfunc): """ Check seed()- and random()-like functions. """ - # Our seed() mimicks Numpy's. + # Our seed() mimics Numpy's. r = np.random.RandomState() for i in [0, 1, 125, 2**32 - 1]: # Need to cast to a C-sized int (for Numpy <= 1.7) @@ -274,7 +274,7 @@ def test_numpy_random(self): self._check_random_seed(numpy_seed, jit_nullary("np.random.rand")) def _check_random_sized(self, seedfunc, randomfunc): - # Our seed() mimicks Numpy's. + # Our seed() mimics Numpy's. r = np.random.RandomState() for i in [0, 1, 125, 2**32 - 1]: # Need to cast to a C-sized int (for Numpy <= 1.7) @@ -1511,7 +1511,7 @@ def check_output(self, out): def check_several_outputs(self, results, same_expected): # Outputs should have the expected statistical properties - # (an unitialized PRNG or a PRNG whose internal state was + # (an uninitialized PRNG or a PRNG whose internal state was # corrupted by a race condition could produce bogus randomness) for out in results: self.check_output(out) diff --git a/numba/tests/test_sort.py b/numba/tests/test_sort.py index ee0c666585d..36788aaf2d9 100644 --- a/numba/tests/test_sort.py +++ b/numba/tests/test_sort.py @@ -402,7 +402,7 @@ def run_merge_at(ms, keys, i): # First check with i == len(stack) - 2 keys = self.array_factory(orig_keys) ms = self.merge_init(keys) - # Push sentinel on stack, to check it was't touched + # Push sentinel on stack, to check it wasn't touched ms = self.timsort.merge_append(ms, stack_sentinel) i = ms.n ms = self.timsort.merge_append(ms, MergeRun(ssa, na)) @@ -413,7 +413,7 @@ def run_merge_at(ms, keys, i): # Now check with i == len(stack) - 3 keys = self.array_factory(orig_keys) ms = self.merge_init(keys) - # Push sentinel on stack, to check it was't touched + # Push sentinel on stack, to check it wasn't touched ms = self.timsort.merge_append(ms, stack_sentinel) i = ms.n ms = self.timsort.merge_append(ms, MergeRun(ssa, na)) diff --git a/numba/tests/test_ssa.py b/numba/tests/test_ssa.py index 2566e44b194..c25094ec1b1 100644 --- a/numba/tests/test_ssa.py +++ b/numba/tests/test_ssa.py @@ -503,7 +503,7 @@ class TestSROAIssues(MemoryLeakMixin, TestCase): # This tests issues related to the SROA optimization done in lowering, which # reduces time spent in the LLVM SROA pass. The optimization is related to # SSA and tries to reduce the number of alloca statements for variables with - # only a single assignemnt. + # only a single assignment. def test_issue7258_multiple_assignment_post_SSA(self): # This test adds a pass that will duplicate assignment statements to # variables named "foobar". diff --git a/numba/tests/test_svml.py b/numba/tests/test_svml.py index c05b38739ae..1822b2e3f0a 100644 --- a/numba/tests/test_svml.py +++ b/numba/tests/test_svml.py @@ -19,7 +19,7 @@ needs_svml = unittest.skipUnless(config.USING_SVML, "SVML tests need SVML to be present") -# a map of float64 vector lenghs with corresponding CPU architecture +# a map of float64 vector lengths with corresponding CPU architecture vlen2cpu = {2: 'nehalem', 4: 'haswell', 8: 'skylake-avx512'} # force LLVM to use AVX512 registers for vectorization # https://reviews.llvm.org/D67259 @@ -207,7 +207,7 @@ def run_template(): fn, contains, avoids = combo_svml_usecase(dtype, mode, vlen, flags['fastmath'], flags['name']) - # look for specific patters in the asm for a given target + # look for specific patterns in the asm for a given target with override_env_config('NUMBA_CPU_NAME', vlen2cpu[vlen]), \ override_env_config('NUMBA_CPU_FEATURES', vlen2cpu_features[vlen]): # recompile for overridden CPU @@ -355,7 +355,7 @@ def check(self, pyfunc, *args, **kwargs): np.testing.assert_almost_equal(jitstd_result, py_expected, **kwargs) np.testing.assert_almost_equal(jitfast_result, py_expected, **kwargs) - # look for specific patters in the asm for a given target + # look for specific patterns in the asm for a given target with override_env_config('NUMBA_CPU_NAME', cpu_name), \ override_env_config('NUMBA_CPU_FEATURES', cpu_features): # recompile for overridden CPU diff --git a/numba/tests/test_unicode.py b/numba/tests/test_unicode.py index 6eb186b040e..5eedc9a3841 100644 --- a/numba/tests/test_unicode.py +++ b/numba/tests/test_unicode.py @@ -2368,7 +2368,7 @@ def pyfunc(s, x, y, count): with self.assertRaises(TypingError) as raises: cfunc('ababababab', 'ba', 'qqq', 3.5) - msg = 'Unsupported parameters. The parametrs must be Integer.' + msg = 'Unsupported parameters. The parameters must be Integer.' self.assertIn(msg, str(raises.exception)) with self.assertRaises(TypingError) as raises: diff --git a/numba/typed/typedlist.py b/numba/typed/typedlist.py index d9c35c71cba..9626afc6e15 100644 --- a/numba/typed/typedlist.py +++ b/numba/typed/typedlist.py @@ -234,7 +234,7 @@ def __init__(self, *args, **kwargs): Parameters ---------- args: iterable - The iterable to intialize the list from + The iterable to initialize the list from lsttype : numba.core.types.ListType; keyword-only Used internally for the list type. meminfo : MemInfo; keyword-only @@ -666,15 +666,15 @@ def impl_numba_typeref_ctor(cls, *args): # special case 0d Numpy arrays if isinstance(args[0], types.Array) and args[0].ndim == 0: def impl(cls, *args): - # Instatiate an empty list and populate it with the single + # Instantiate an empty list and populate it with the single # value from the array. r = List.empty_list(item_type) r.append(args[0].item()) return r else: def impl(cls, *args): - # Instatiate an empty list and populate it with values from the - # iterable. + # Instantiate an empty list and populate it with values from + # the iterable. r = List.empty_list(item_type) for i in args[0]: r.append(i)