Linux vps-61133.fhnet.fr 4.9.0-19-amd64 #1 SMP Debian 4.9.320-2 (2022-06-30) x86_64
Apache/2.4.25 (Debian)
Server IP : 93.113.207.21 & Your IP : 216.73.216.112
Domains :
Cant Read [ /etc/named.conf ]
User : www-data
Terminal
Auto Root
Create File
Create Folder
Localroot Suggester
Backdoor Destroyer
Readme
/
usr /
src /
Python-3.10.14 /
Tools /
scripts /
Delete
Unzip
Name
Size
Permission
Date
Action
2to3
96
B
-rwxr-xr-x
2024-03-19 22:46
README
4.46
KB
-rw-r--r--
2024-03-19 22:46
abitype.py
5.44
KB
-rwxr-xr-x
2024-03-19 22:46
analyze_dxp.py
4.08
KB
-rw-r--r--
2024-03-19 22:46
byext.py
3.81
KB
-rwxr-xr-x
2024-03-19 22:46
byteyears.py
1.61
KB
-rwxr-xr-x
2024-03-19 22:46
checkpip.py
793
B
-rwxr-xr-x
2024-03-19 22:46
cleanfuture.py
8.42
KB
-rwxr-xr-x
2024-03-19 22:46
combinerefs.py
4.35
KB
-rwxr-xr-x
2024-03-19 22:46
copytime.py
663
B
-rwxr-xr-x
2024-03-19 22:46
crlf.py
632
B
-rwxr-xr-x
2024-03-19 22:46
db2pickle.py
3.54
KB
-rwxr-xr-x
2024-03-19 22:46
diff.py
2.2
KB
-rwxr-xr-x
2024-03-19 22:46
dutree.doc
2.18
KB
-rw-r--r--
2024-03-19 22:46
dutree.py
1.6
KB
-rwxr-xr-x
2024-03-19 22:46
eptags.py
1.54
KB
-rwxr-xr-x
2024-03-19 22:46
find-uname.py
1.18
KB
-rwxr-xr-x
2024-03-19 22:46
find_recursionlimit.py
3.9
KB
-rwxr-xr-x
2024-03-19 22:46
finddiv.py
2.48
KB
-rwxr-xr-x
2024-03-19 22:46
findlinksto.py
1.05
KB
-rwxr-xr-x
2024-03-19 22:46
findnocoding.py
2.88
KB
-rwxr-xr-x
2024-03-19 22:46
fixcid.py
9.93
KB
-rwxr-xr-x
2024-03-19 22:46
fixdiv.py
13.91
KB
-rwxr-xr-x
2024-03-19 22:46
fixheader.py
1.33
KB
-rwxr-xr-x
2024-03-19 22:46
fixnotice.py
2.96
KB
-rwxr-xr-x
2024-03-19 22:46
fixps.py
892
B
-rwxr-xr-x
2024-03-19 22:46
generate_opcode_h.py
2.17
KB
-rw-r--r--
2024-03-19 22:46
generate_stdlib_module_names.py
4.55
KB
-rw-r--r--
2024-03-19 22:46
generate_token.py
6.81
KB
-rwxr-xr-x
2024-03-19 22:46
get-remote-certificate.py
2.25
KB
-rwxr-xr-x
2024-03-19 22:46
google.py
501
B
-rwxr-xr-x
2024-03-19 22:46
gprof2html.py
2.25
KB
-rwxr-xr-x
2024-03-19 22:46
highlight.py
8.95
KB
-rwxr-xr-x
2024-03-19 22:46
idle3
96
B
-rwxr-xr-x
2024-03-19 22:46
ifdef.py
3.62
KB
-rwxr-xr-x
2024-03-19 22:46
import_diagnostics.py
999
B
-rwxr-xr-x
2024-03-19 22:46
lfcr.py
640
B
-rwxr-xr-x
2024-03-19 22:46
linktree.py
2.38
KB
-rwxr-xr-x
2024-03-19 22:46
lll.py
748
B
-rwxr-xr-x
2024-03-19 22:46
mailerdaemon.py
7.85
KB
-rwxr-xr-x
2024-03-19 22:46
make_ctype.py
2.23
KB
-rwxr-xr-x
2024-03-19 22:46
md5sum.py
2.46
KB
-rwxr-xr-x
2024-03-19 22:46
mkreal.py
1.59
KB
-rwxr-xr-x
2024-03-19 22:46
ndiff.py
3.73
KB
-rwxr-xr-x
2024-03-19 22:46
nm2def.py
2.42
KB
-rwxr-xr-x
2024-03-19 22:46
objgraph.py
5.8
KB
-rwxr-xr-x
2024-03-19 22:46
parse_html5_entities.py
3.91
KB
-rwxr-xr-x
2024-03-19 22:46
parseentities.py
1.7
KB
-rwxr-xr-x
2024-03-19 22:46
patchcheck.py
10.51
KB
-rwxr-xr-x
2024-03-19 22:46
pathfix.py
6.63
KB
-rwxr-xr-x
2024-03-19 22:46
pdeps.py
3.91
KB
-rwxr-xr-x
2024-03-19 22:46
pep384_macrocheck.py
4.61
KB
-rw-r--r--
2024-03-19 22:46
pickle2db.py
3.93
KB
-rwxr-xr-x
2024-03-19 22:46
pindent.py
16.73
KB
-rwxr-xr-x
2024-03-19 22:46
ptags.py
1.28
KB
-rwxr-xr-x
2024-03-19 22:46
pydoc3
80
B
-rwxr-xr-x
2024-03-19 22:46
pysource.py
3.77
KB
-rwxr-xr-x
2024-03-19 22:46
reindent-rst.py
279
B
-rwxr-xr-x
2024-03-19 22:46
reindent.py
11.37
KB
-rwxr-xr-x
2024-03-19 22:46
rgrep.py
1.54
KB
-rwxr-xr-x
2024-03-19 22:46
run_tests.py
1.78
KB
-rw-r--r--
2024-03-19 22:46
serve.py
1.2
KB
-rwxr-xr-x
2024-03-19 22:46
smelly.py
4.9
KB
-rwxr-xr-x
2024-03-19 22:46
stable_abi.py
21.09
KB
-rwxr-xr-x
2024-03-19 22:46
suff.py
510
B
-rwxr-xr-x
2024-03-19 22:46
texi2html.py
68.72
KB
-rwxr-xr-x
2024-03-19 22:46
untabify.py
1.27
KB
-rwxr-xr-x
2024-03-19 22:46
update_file.py
762
B
-rw-r--r--
2024-03-19 22:46
var_access_benchmark.py
11.63
KB
-rw-r--r--
2024-03-19 22:46
verify_ensurepip_wheels.py
3.31
KB
-rwxr-xr-x
2024-03-19 22:46
which.py
1.65
KB
-rwxr-xr-x
2024-03-19 22:46
win_add2path.py
1.62
KB
-rw-r--r--
2024-03-19 22:46
Save
Rename
#! /usr/bin/env python3 # This script generates token related files from Grammar/Tokens: # # Doc/library/token-list.inc # Include/token.h # Parser/token.c # Lib/token.py NT_OFFSET = 256 def load_tokens(path): tok_names = [] string_to_tok = {} ERRORTOKEN = None with open(path) as fp: for line in fp: line = line.strip() # strip comments i = line.find('#') if i >= 0: line = line[:i].strip() if not line: continue fields = line.split() name = fields[0] value = len(tok_names) if name == 'ERRORTOKEN': ERRORTOKEN = value string = fields[1] if len(fields) > 1 else None if string: string = eval(string) string_to_tok[string] = value tok_names.append(name) return tok_names, ERRORTOKEN, string_to_tok def update_file(file, content): try: with open(file, 'r') as fobj: if fobj.read() == content: return False except (OSError, ValueError): pass with open(file, 'w') as fobj: fobj.write(content) return True token_h_template = """\ /* Auto-generated by Tools/scripts/generate_token.py */ /* Token types */ #ifndef Py_LIMITED_API #ifndef Py_TOKEN_H #define Py_TOKEN_H #ifdef __cplusplus extern "C" { #endif #undef TILDE /* Prevent clash of our definition with system macro. Ex AIX, ioctl.h */ %s\ #define N_TOKENS %d #define NT_OFFSET %d /* Special definitions for cooperation with parser */ #define ISTERMINAL(x) ((x) < NT_OFFSET) #define ISNONTERMINAL(x) ((x) >= NT_OFFSET) #define ISEOF(x) ((x) == ENDMARKER) #define ISWHITESPACE(x) ((x) == ENDMARKER || \\ (x) == NEWLINE || \\ (x) == INDENT || \\ (x) == DEDENT) PyAPI_DATA(const char * const) _PyParser_TokenNames[]; /* Token names */ PyAPI_FUNC(int) PyToken_OneChar(int); PyAPI_FUNC(int) PyToken_TwoChars(int, int); PyAPI_FUNC(int) PyToken_ThreeChars(int, int, int); #ifdef __cplusplus } #endif #endif /* !Py_TOKEN_H */ #endif /* Py_LIMITED_API */ """ def make_h(infile, outfile='Include/token.h'): tok_names, ERRORTOKEN, string_to_tok = load_tokens(infile) defines = [] for value, name in enumerate(tok_names[:ERRORTOKEN + 1]): defines.append("#define %-15s %d\n" % (name, value)) if update_file(outfile, token_h_template % ( ''.join(defines), len(tok_names), NT_OFFSET )): print("%s regenerated from %s" % (outfile, infile)) token_c_template = """\ /* Auto-generated by Tools/scripts/generate_token.py */ #include "Python.h" #include "token.h" /* Token names */ const char * const _PyParser_TokenNames[] = { %s\ }; /* Return the token corresponding to a single character */ int PyToken_OneChar(int c1) { %s\ return OP; } int PyToken_TwoChars(int c1, int c2) { %s\ return OP; } int PyToken_ThreeChars(int c1, int c2, int c3) { %s\ return OP; } """ def generate_chars_to_token(mapping, n=1): result = [] write = result.append indent = ' ' * n write(indent) write('switch (c%d) {\n' % (n,)) for c in sorted(mapping): write(indent) value = mapping[c] if isinstance(value, dict): write("case '%s':\n" % (c,)) write(generate_chars_to_token(value, n + 1)) write(indent) write(' break;\n') else: write("case '%s': return %s;\n" % (c, value)) write(indent) write('}\n') return ''.join(result) def make_c(infile, outfile='Parser/token.c'): tok_names, ERRORTOKEN, string_to_tok = load_tokens(infile) string_to_tok['<>'] = string_to_tok['!='] chars_to_token = {} for string, value in string_to_tok.items(): assert 1 <= len(string) <= 3 name = tok_names[value] m = chars_to_token.setdefault(len(string), {}) for c in string[:-1]: m = m.setdefault(c, {}) m[string[-1]] = name names = [] for value, name in enumerate(tok_names): if value >= ERRORTOKEN: name = '<%s>' % name names.append(' "%s",\n' % name) names.append(' "<N_TOKENS>",\n') if update_file(outfile, token_c_template % ( ''.join(names), generate_chars_to_token(chars_to_token[1]), generate_chars_to_token(chars_to_token[2]), generate_chars_to_token(chars_to_token[3]) )): print("%s regenerated from %s" % (outfile, infile)) token_inc_template = """\ .. Auto-generated by Tools/scripts/generate_token.py %s .. data:: N_TOKENS .. data:: NT_OFFSET """ def make_rst(infile, outfile='Doc/library/token-list.inc'): tok_names, ERRORTOKEN, string_to_tok = load_tokens(infile) tok_to_string = {value: s for s, value in string_to_tok.items()} names = [] for value, name in enumerate(tok_names[:ERRORTOKEN + 1]): names.append('.. data:: %s' % (name,)) if value in tok_to_string: names.append('') names.append(' Token value for ``"%s"``.' % tok_to_string[value]) names.append('') if update_file(outfile, token_inc_template % '\n'.join(names)): print("%s regenerated from %s" % (outfile, infile)) token_py_template = '''\ """Token constants.""" # Auto-generated by Tools/scripts/generate_token.py __all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF'] %s N_TOKENS = %d # Special definitions for cooperation with parser NT_OFFSET = %d tok_name = {value: name for name, value in globals().items() if isinstance(value, int) and not name.startswith('_')} __all__.extend(tok_name.values()) EXACT_TOKEN_TYPES = { %s } def ISTERMINAL(x): return x < NT_OFFSET def ISNONTERMINAL(x): return x >= NT_OFFSET def ISEOF(x): return x == ENDMARKER ''' def make_py(infile, outfile='Lib/token.py'): tok_names, ERRORTOKEN, string_to_tok = load_tokens(infile) constants = [] for value, name in enumerate(tok_names): constants.append('%s = %d' % (name, value)) constants.insert(ERRORTOKEN, "# These aren't used by the C tokenizer but are needed for tokenize.py") token_types = [] for s, value in sorted(string_to_tok.items()): token_types.append(' %r: %s,' % (s, tok_names[value])) if update_file(outfile, token_py_template % ( '\n'.join(constants), len(tok_names), NT_OFFSET, '\n'.join(token_types), )): print("%s regenerated from %s" % (outfile, infile)) def main(op, infile='Grammar/Tokens', *args): make = globals()['make_' + op] make(infile, *args) if __name__ == '__main__': import sys main(*sys.argv[1:])