mirror of https://github.com/pypa/pip
Upgrade pygments to 2.14.0
This commit is contained in:
parent
d2852d0ad2
commit
c0ba81850b
|
@ -0,0 +1 @@
|
|||
Upgrade pygments to 2.14.0
|
|
@ -26,7 +26,7 @@
|
|||
"""
|
||||
from io import StringIO, BytesIO
|
||||
|
||||
__version__ = '2.13.0'
|
||||
__version__ = '2.14.0'
|
||||
__docformat__ = 'restructuredtext'
|
||||
|
||||
__all__ = ['lex', 'format', 'highlight']
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
from fnmatch import fnmatch
|
||||
|
|
|
@ -878,10 +878,12 @@ class HtmlFormatter(Formatter):
|
|||
# for all but the last line
|
||||
for part in parts[:-1]:
|
||||
if line:
|
||||
if lspan != cspan:
|
||||
# Also check for part being non-empty, so we avoid creating
|
||||
# empty <span> tags
|
||||
if lspan != cspan and part:
|
||||
line.extend(((lspan and '</span>'), cspan, part,
|
||||
(cspan and '</span>'), lsep))
|
||||
else: # both are the same
|
||||
else: # both are the same, or the current part was empty
|
||||
line.extend((part, (lspan and '</span>'), lsep))
|
||||
yield 1, ''.join(line)
|
||||
line = []
|
||||
|
|
|
@ -128,38 +128,12 @@ class IRCFormatter(Formatter):
|
|||
self._lineno = 0
|
||||
|
||||
def _write_lineno(self, outfile):
|
||||
self._lineno += 1
|
||||
outfile.write("\n%04d: " % self._lineno)
|
||||
|
||||
def _format_unencoded_with_lineno(self, tokensource, outfile):
|
||||
self._write_lineno(outfile)
|
||||
|
||||
for ttype, value in tokensource:
|
||||
if value.endswith("\n"):
|
||||
self._write_lineno(outfile)
|
||||
value = value[:-1]
|
||||
color = self.colorscheme.get(ttype)
|
||||
while color is None:
|
||||
ttype = ttype.parent
|
||||
color = self.colorscheme.get(ttype)
|
||||
if color:
|
||||
color = color[self.darkbg]
|
||||
spl = value.split('\n')
|
||||
for line in spl[:-1]:
|
||||
self._write_lineno(outfile)
|
||||
if line:
|
||||
outfile.write(ircformat(color, line[:-1]))
|
||||
if spl[-1]:
|
||||
outfile.write(ircformat(color, spl[-1]))
|
||||
else:
|
||||
outfile.write(value)
|
||||
|
||||
outfile.write("\n")
|
||||
if self.linenos:
|
||||
self._lineno += 1
|
||||
outfile.write("%04d: " % self._lineno)
|
||||
|
||||
def format_unencoded(self, tokensource, outfile):
|
||||
if self.linenos:
|
||||
self._format_unencoded_with_lineno(tokensource, outfile)
|
||||
return
|
||||
self._write_lineno(outfile)
|
||||
|
||||
for ttype, value in tokensource:
|
||||
color = self.colorscheme.get(ttype)
|
||||
|
@ -173,6 +147,7 @@ class IRCFormatter(Formatter):
|
|||
if line:
|
||||
outfile.write(ircformat(color, line))
|
||||
outfile.write('\n')
|
||||
self._write_lineno(outfile)
|
||||
if spl[-1]:
|
||||
outfile.write(ircformat(color, spl[-1]))
|
||||
else:
|
||||
|
|
|
@ -14,15 +14,16 @@ import time
|
|||
|
||||
from pip._vendor.pygments.filter import apply_filters, Filter
|
||||
from pip._vendor.pygments.filters import get_filter_by_name
|
||||
from pip._vendor.pygments.token import Error, Text, Other, _TokenType
|
||||
from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType
|
||||
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
|
||||
make_analysator, Future, guess_decode
|
||||
from pip._vendor.pygments.regexopt import regex_opt
|
||||
|
||||
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
|
||||
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
|
||||
'default', 'words']
|
||||
'default', 'words', 'line_re']
|
||||
|
||||
line_re = re.compile('.*?\n')
|
||||
|
||||
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
|
||||
(b'\xff\xfe\0\0', 'utf-32'),
|
||||
|
@ -670,7 +671,7 @@ class RegexLexer(Lexer, metaclass=RegexLexerMeta):
|
|||
# at EOL, reset state to "root"
|
||||
statestack = ['root']
|
||||
statetokens = tokendefs['root']
|
||||
yield pos, Text, '\n'
|
||||
yield pos, Whitespace, '\n'
|
||||
pos += 1
|
||||
continue
|
||||
yield pos, Error, text[pos]
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
from fnmatch import fnmatch
|
||||
|
|
|
@ -30,6 +30,7 @@ LEXERS = {
|
|||
'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
|
||||
'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
|
||||
'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
|
||||
'ArturoLexer': ('pip._vendor.pygments.lexers.arturo', 'Arturo', ('arturo', 'art'), ('*.art',), ()),
|
||||
'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature')),
|
||||
'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
|
||||
'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
|
||||
|
@ -152,13 +153,14 @@ LEXERS = {
|
|||
'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
|
||||
'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
|
||||
'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
|
||||
'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
|
||||
'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi', '*.fsx'), ('text/x-fsharp',)),
|
||||
'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
|
||||
'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
|
||||
'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
|
||||
'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
|
||||
'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
|
||||
'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
|
||||
'FiftLexer': ('pip._vendor.pygments.lexers.fift', 'Fift', ('fift', 'fif'), ('*.fif',), ()),
|
||||
'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
|
||||
'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
|
||||
'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
|
||||
|
@ -167,7 +169,9 @@ LEXERS = {
|
|||
'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
|
||||
'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
|
||||
'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
|
||||
'FuncLexer': ('pip._vendor.pygments.lexers.func', 'FunC', ('func', 'fc'), ('*.fc', '*.func'), ()),
|
||||
'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
|
||||
'GAPConsoleLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP session', ('gap-console', 'gap-repl'), ('*.tst',), ()),
|
||||
'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
|
||||
'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
|
||||
'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
|
||||
|
@ -196,7 +200,7 @@ LEXERS = {
|
|||
'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
|
||||
'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
|
||||
'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
|
||||
'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
|
||||
'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), ('*Spec.hs',), ()),
|
||||
'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
|
||||
'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
|
||||
'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
|
||||
|
@ -236,6 +240,7 @@ LEXERS = {
|
|||
'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()),
|
||||
'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
|
||||
'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')),
|
||||
'JsonnetLexer': ('pip._vendor.pygments.lexers.jsonnet', 'Jsonnet', ('jsonnet',), ('*.jsonnet', '*.libsonnet'), ()),
|
||||
'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
|
||||
'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
|
||||
'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
|
||||
|
@ -270,8 +275,10 @@ LEXERS = {
|
|||
'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
|
||||
'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
|
||||
'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
|
||||
'MCFunctionLexer': ('pip._vendor.pygments.lexers.mcfunction', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
|
||||
'MCFunctionLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
|
||||
'MCSchemaLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCSchema', ('mcschema',), ('*.mcschema',), ('text/mcschema',)),
|
||||
'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
|
||||
'MIPSLexer': ('pip._vendor.pygments.lexers.mips', 'MIPS', ('mips',), ('*.mips', '*.MIPS'), ()),
|
||||
'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
|
||||
'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
|
||||
'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
|
||||
|
@ -316,7 +323,7 @@ LEXERS = {
|
|||
'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
|
||||
'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
|
||||
'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
|
||||
'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
|
||||
'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM', '*.nasm'), ('text/x-nasm',)),
|
||||
'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
|
||||
'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
|
||||
'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
|
||||
|
@ -350,6 +357,7 @@ LEXERS = {
|
|||
'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
|
||||
'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
|
||||
'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
|
||||
'PhixLexer': ('pip._vendor.pygments.lexers.phix', 'Phix', ('phix',), ('*.exw',), ('text/x-phix',)),
|
||||
'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
|
||||
'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
|
||||
'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
|
||||
|
@ -357,6 +365,7 @@ LEXERS = {
|
|||
'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
|
||||
'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
|
||||
'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
|
||||
'PortugolLexer': ('pip._vendor.pygments.lexers.pascal', 'Portugol', ('portugol',), ('*.alg', '*.portugol'), ()),
|
||||
'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
|
||||
'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
|
||||
'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
|
||||
|
@ -376,7 +385,7 @@ LEXERS = {
|
|||
'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
|
||||
'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
|
||||
'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
|
||||
'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
|
||||
'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.pyi', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
|
||||
'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
|
||||
'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
|
||||
'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
|
||||
|
@ -421,7 +430,7 @@ LEXERS = {
|
|||
'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
|
||||
'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
|
||||
'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
|
||||
'SNBTLexer': ('pip._vendor.pygments.lexers.mcfunction', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
|
||||
'SNBTLexer': ('pip._vendor.pygments.lexers.minecraft', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
|
||||
'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
|
||||
'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
|
||||
'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
|
||||
|
@ -485,6 +494,7 @@ LEXERS = {
|
|||
'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
|
||||
'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
|
||||
'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
|
||||
'TlbLexer': ('pip._vendor.pygments.lexers.tlb', 'Tl-b', ('tlb',), ('*.tlb',), ()),
|
||||
'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
|
||||
'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
|
||||
'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
|
||||
|
@ -519,6 +529,8 @@ LEXERS = {
|
|||
'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
|
||||
'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
|
||||
'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
|
||||
'WoWTocLexer': ('pip._vendor.pygments.lexers.wowtoc', 'World of Warcraft TOC', ('wowtoc',), ('*.toc',), ()),
|
||||
'WrenLexer': ('pip._vendor.pygments.lexers.wren', 'Wren', ('wren',), ('*.wren',), ()),
|
||||
'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
|
||||
'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
|
||||
'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
|
||||
|
|
|
@ -12,18 +12,16 @@ import re
|
|||
import keyword
|
||||
|
||||
from pip._vendor.pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
|
||||
default, words, combined, do_insertions, this
|
||||
default, words, combined, do_insertions, this, line_re
|
||||
from pip._vendor.pygments.util import get_bool_opt, shebang_matches
|
||||
from pip._vendor.pygments.token import Text, Comment, Operator, Keyword, Name, String, \
|
||||
Number, Punctuation, Generic, Other, Error
|
||||
Number, Punctuation, Generic, Other, Error, Whitespace
|
||||
from pip._vendor.pygments import unistring as uni
|
||||
|
||||
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
|
||||
'Python2Lexer', 'Python2TracebackLexer',
|
||||
'CythonLexer', 'DgLexer', 'NumPyLexer']
|
||||
|
||||
line_re = re.compile('.*?\n')
|
||||
|
||||
|
||||
class PythonLexer(RegexLexer):
|
||||
"""
|
||||
|
@ -42,6 +40,8 @@ class PythonLexer(RegexLexer):
|
|||
filenames = [
|
||||
'*.py',
|
||||
'*.pyw',
|
||||
# Type stubs
|
||||
'*.pyi',
|
||||
# Jython
|
||||
'*.jy',
|
||||
# Sage
|
||||
|
@ -100,11 +100,11 @@ class PythonLexer(RegexLexer):
|
|||
|
||||
tokens = {
|
||||
'root': [
|
||||
(r'\n', Text),
|
||||
(r'\n', Whitespace),
|
||||
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
|
||||
bygroups(Text, String.Affix, String.Doc)),
|
||||
bygroups(Whitespace, String.Affix, String.Doc)),
|
||||
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
|
||||
bygroups(Text, String.Affix, String.Doc)),
|
||||
bygroups(Whitespace, String.Affix, String.Doc)),
|
||||
(r'\A#!.+$', Comment.Hashbang),
|
||||
(r'#.*$', Comment.Single),
|
||||
(r'\\\n', Text),
|
||||
|
@ -169,7 +169,7 @@ class PythonLexer(RegexLexer):
|
|||
combined('bytesescape', 'dqs')),
|
||||
("([bB])(')", bygroups(String.Affix, String.Single),
|
||||
combined('bytesescape', 'sqs')),
|
||||
|
||||
|
||||
(r'[^\S\n]+', Text),
|
||||
include('numbers'),
|
||||
(r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
|
||||
|
@ -192,13 +192,13 @@ class PythonLexer(RegexLexer):
|
|||
(r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
|
||||
r'(\![sraf])?' # conversion
|
||||
r':', String.Interpol, '#pop'),
|
||||
(r'\s+', Text), # allow new lines
|
||||
(r'\s+', Whitespace), # allow new lines
|
||||
include('expr'),
|
||||
],
|
||||
'expr-inside-fstring-inner': [
|
||||
(r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
|
||||
(r'[])}]', Punctuation, '#pop'),
|
||||
(r'\s+', Text), # allow new lines
|
||||
(r'\s+', Whitespace), # allow new lines
|
||||
include('expr'),
|
||||
],
|
||||
'expr-keywords': [
|
||||
|
@ -229,7 +229,7 @@ class PythonLexer(RegexLexer):
|
|||
],
|
||||
'soft-keywords-inner': [
|
||||
# optional `_` keyword
|
||||
(r'(\s+)([^\n_]*)(_\b)', bygroups(Text, using(this), Keyword)),
|
||||
(r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)),
|
||||
default('#pop')
|
||||
],
|
||||
'builtins': [
|
||||
|
@ -445,11 +445,11 @@ class Python2Lexer(RegexLexer):
|
|||
|
||||
tokens = {
|
||||
'root': [
|
||||
(r'\n', Text),
|
||||
(r'\n', Whitespace),
|
||||
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
|
||||
bygroups(Text, String.Affix, String.Doc)),
|
||||
bygroups(Whitespace, String.Affix, String.Doc)),
|
||||
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
|
||||
bygroups(Text, String.Affix, String.Doc)),
|
||||
bygroups(Whitespace, String.Affix, String.Doc)),
|
||||
(r'[^\S\n]+', Text),
|
||||
(r'\A#!.+$', Comment.Hashbang),
|
||||
(r'#.*$', Comment.Single),
|
||||
|
@ -742,7 +742,7 @@ class PythonTracebackLexer(RegexLexer):
|
|||
|
||||
tokens = {
|
||||
'root': [
|
||||
(r'\n', Text),
|
||||
(r'\n', Whitespace),
|
||||
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
|
||||
(r'^During handling of the above exception, another '
|
||||
r'exception occurred:\n\n', Generic.Traceback),
|
||||
|
@ -753,24 +753,24 @@ class PythonTracebackLexer(RegexLexer):
|
|||
],
|
||||
'intb': [
|
||||
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
|
||||
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
|
||||
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
|
||||
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
|
||||
bygroups(Text, Name.Builtin, Text, Number, Text)),
|
||||
bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
|
||||
(r'^( )(.+)(\n)',
|
||||
bygroups(Text, using(PythonLexer), Text), 'markers'),
|
||||
bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'),
|
||||
(r'^([ \t]*)(\.\.\.)(\n)',
|
||||
bygroups(Text, Comment, Text)), # for doctests...
|
||||
bygroups(Whitespace, Comment, Whitespace)), # for doctests...
|
||||
(r'^([^:]+)(: )(.+)(\n)',
|
||||
bygroups(Generic.Error, Text, Name, Text), '#pop'),
|
||||
bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
|
||||
(r'^([a-zA-Z_][\w.]*)(:?\n)',
|
||||
bygroups(Generic.Error, Text), '#pop')
|
||||
bygroups(Generic.Error, Whitespace), '#pop')
|
||||
],
|
||||
'markers': [
|
||||
# Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>`
|
||||
# error locations in Python 3.11+, or single-caret markers
|
||||
# for syntax errors before that.
|
||||
(r'^( {4,})([~^]+)(\n)',
|
||||
bygroups(Text, Punctuation.Marker, Text),
|
||||
bygroups(Whitespace, Punctuation.Marker, Whitespace),
|
||||
'#pop'),
|
||||
default('#pop'),
|
||||
],
|
||||
|
@ -808,17 +808,17 @@ class Python2TracebackLexer(RegexLexer):
|
|||
],
|
||||
'intb': [
|
||||
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
|
||||
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
|
||||
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
|
||||
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
|
||||
bygroups(Text, Name.Builtin, Text, Number, Text)),
|
||||
bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
|
||||
(r'^( )(.+)(\n)',
|
||||
bygroups(Text, using(Python2Lexer), Text), 'marker'),
|
||||
bygroups(Text, using(Python2Lexer), Whitespace), 'marker'),
|
||||
(r'^([ \t]*)(\.\.\.)(\n)',
|
||||
bygroups(Text, Comment, Text)), # for doctests...
|
||||
bygroups(Text, Comment, Whitespace)), # for doctests...
|
||||
(r'^([^:]+)(: )(.+)(\n)',
|
||||
bygroups(Generic.Error, Text, Name, Text), '#pop'),
|
||||
bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
|
||||
(r'^([a-zA-Z_]\w*)(:?\n)',
|
||||
bygroups(Generic.Error, Text), '#pop')
|
||||
bygroups(Generic.Error, Whitespace), '#pop')
|
||||
],
|
||||
'marker': [
|
||||
# For syntax errors.
|
||||
|
@ -843,13 +843,13 @@ class CythonLexer(RegexLexer):
|
|||
|
||||
tokens = {
|
||||
'root': [
|
||||
(r'\n', Text),
|
||||
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
|
||||
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
|
||||
(r'\n', Whitespace),
|
||||
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)),
|
||||
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)),
|
||||
(r'[^\S\n]+', Text),
|
||||
(r'#.*$', Comment),
|
||||
(r'[]{}:(),;[]', Punctuation),
|
||||
(r'\\\n', Text),
|
||||
(r'\\\n', Whitespace),
|
||||
(r'\\', Text),
|
||||
(r'(in|is|and|or|not)\b', Operator.Word),
|
||||
(r'(<)([a-zA-Z0-9.?]+)(>)',
|
||||
|
|
|
@ -74,6 +74,8 @@ class PygmentsDoc(Directive):
|
|||
out = self.document_formatters()
|
||||
elif self.arguments[0] == 'filters':
|
||||
out = self.document_filters()
|
||||
elif self.arguments[0] == 'lexers_overview':
|
||||
out = self.document_lexers_overview()
|
||||
else:
|
||||
raise Exception('invalid argument for "pygmentsdoc" directive')
|
||||
node = nodes.compound()
|
||||
|
@ -83,6 +85,66 @@ class PygmentsDoc(Directive):
|
|||
self.state.document.settings.record_dependencies.add(fn)
|
||||
return node.children
|
||||
|
||||
def document_lexers_overview(self):
|
||||
"""Generate a tabular overview of all lexers.
|
||||
|
||||
The columns are the lexer name, the extensions handled by this lexer
|
||||
(or "None"), the aliases and a link to the lexer class."""
|
||||
from pip._vendor.pygments.lexers._mapping import LEXERS
|
||||
from pip._vendor.pygments.lexers import find_lexer_class
|
||||
out = []
|
||||
|
||||
table = []
|
||||
|
||||
def format_link(name, url):
|
||||
if url:
|
||||
return f'`{name} <{url}>`_'
|
||||
return name
|
||||
|
||||
for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
|
||||
lexer_cls = find_lexer_class(data[1])
|
||||
extensions = lexer_cls.filenames + lexer_cls.alias_filenames
|
||||
|
||||
table.append({
|
||||
'name': format_link(data[1], lexer_cls.url),
|
||||
'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
|
||||
'aliases': ', '.join(data[2]),
|
||||
'class': f'{data[0]}.{classname}'
|
||||
})
|
||||
|
||||
column_names = ['name', 'extensions', 'aliases', 'class']
|
||||
column_lengths = [max([len(row[column]) for row in table if row[column]])
|
||||
for column in column_names]
|
||||
|
||||
def write_row(*columns):
|
||||
"""Format a table row"""
|
||||
out = []
|
||||
for l, c in zip(column_lengths, columns):
|
||||
if c:
|
||||
out.append(c.ljust(l))
|
||||
else:
|
||||
out.append(' '*l)
|
||||
|
||||
return ' '.join(out)
|
||||
|
||||
def write_seperator():
|
||||
"""Write a table separator row"""
|
||||
sep = ['='*c for c in column_lengths]
|
||||
return write_row(*sep)
|
||||
|
||||
out.append(write_seperator())
|
||||
out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
|
||||
out.append(write_seperator())
|
||||
for row in table:
|
||||
out.append(write_row(
|
||||
row['name'],
|
||||
row['extensions'],
|
||||
row['aliases'],
|
||||
f':class:`~{row["class"]}`'))
|
||||
out.append(write_seperator())
|
||||
|
||||
return '\n'.join(out)
|
||||
|
||||
def document_lexers(self):
|
||||
from pip._vendor.pygments.lexers._mapping import LEXERS
|
||||
out = []
|
||||
|
|
|
@ -13,7 +13,7 @@ requests==2.28.2
|
|||
idna==3.4
|
||||
urllib3==1.26.15
|
||||
rich==13.3.3
|
||||
pygments==2.13.0
|
||||
pygments==2.14.0
|
||||
typing_extensions==4.5.0
|
||||
resolvelib==1.0.1
|
||||
setuptools==67.6.1
|
||||
|
|
|
@ -35,3 +35,25 @@ index c6e2517df..76255b525 100644
|
|||
+ sys.exit(main(sys.argv))
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
diff --git a/src/pip/_vendor/pygments/sphinxext.py b/src/pip/_vendor/pygments/sphinxext.py
|
||||
index 3ea2e36e1..23c19504c 100644
|
||||
--- a/src/pip/_vendor/pygments/sphinxext.py
|
||||
+++ b/src/pip/_vendor/pygments/sphinxext.py
|
||||
@@ -91,7 +91,7 @@ class PygmentsDoc(Directive):
|
||||
The columns are the lexer name, the extensions handled by this lexer
|
||||
(or "None"), the aliases and a link to the lexer class."""
|
||||
from pygments.lexers._mapping import LEXERS
|
||||
- import pygments.lexers
|
||||
+ from pygments.lexers import find_lexer_class
|
||||
out = []
|
||||
|
||||
table = []
|
||||
@@ -102,7 +102,7 @@ class PygmentsDoc(Directive):
|
||||
return name
|
||||
|
||||
for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
|
||||
- lexer_cls = pygments.lexers.find_lexer_class(data[1])
|
||||
+ lexer_cls = find_lexer_class(data[1])
|
||||
extensions = lexer_cls.filenames + lexer_cls.alias_filenames
|
||||
|
||||
table.append({
|
||||
|
|
Loading…
Reference in New Issue