Linux vps-61133.fhnet.fr 4.9.0-19-amd64 #1 SMP Debian 4.9.320-2 (2022-06-30) x86_64
Apache/2.4.25 (Debian)
Server IP : 93.113.207.21 & Your IP : 216.73.216.35
Domains :
Cant Read [ /etc/named.conf ]
User : www-data
Terminal
Auto Root
Create File
Create Folder
Localroot Suggester
Backdoor Destroyer
Readme
/
usr /
src /
Python-3.10.14 /
Parser /
Delete
Unzip
Name
Size
Permission
Date
Action
Python.asdl
6.02
KB
-rw-r--r--
2024-03-19 22:46
asdl.py
12.76
KB
-rw-r--r--
2024-03-19 22:46
asdl_c.py
51.54
KB
-rwxr-xr-x
2024-03-19 22:46
myreadline.c
11.6
KB
-rw-r--r--
2024-03-19 22:46
myreadline.gcda
4.29
KB
-rw-r--r--
2025-06-04 09:21
myreadline.o
53.38
KB
-rw-r--r--
2025-06-04 09:22
parser.c
1.14
MB
-rw-r--r--
2024-03-19 22:46
parser.gcda
94.42
KB
-rw-r--r--
2025-06-04 09:21
parser.o
3
MB
-rw-r--r--
2025-06-04 09:22
peg_api.c
874
B
-rw-r--r--
2024-03-19 22:46
peg_api.gcda
3.57
KB
-rw-r--r--
2025-06-04 09:21
peg_api.o
59.19
KB
-rw-r--r--
2025-06-04 09:21
pegen.c
75.33
KB
-rw-r--r--
2024-03-19 22:46
pegen.gcda
18.82
KB
-rw-r--r--
2025-06-04 09:21
pegen.h
11.86
KB
-rw-r--r--
2024-03-19 22:46
pegen.o
437.34
KB
-rw-r--r--
2025-06-04 09:22
string_parser.c
39.2
KB
-rw-r--r--
2024-03-19 22:46
string_parser.gcda
7.37
KB
-rw-r--r--
2025-06-04 09:21
string_parser.h
1.77
KB
-rw-r--r--
2024-03-19 22:46
string_parser.o
259.25
KB
-rw-r--r--
2025-06-04 09:22
token.c
4.51
KB
-rw-r--r--
2024-03-19 22:46
token.gcda
3.97
KB
-rw-r--r--
2025-06-04 09:21
token.o
42.32
KB
-rw-r--r--
2025-06-04 09:21
tokenizer.c
63.17
KB
-rw-r--r--
2024-03-19 22:46
tokenizer.gcda
13.27
KB
-rw-r--r--
2025-06-04 09:21
tokenizer.h
4.45
KB
-rw-r--r--
2024-03-19 22:46
tokenizer.o
330.31
KB
-rw-r--r--
2025-06-04 09:22
Save
Rename
#ifndef Py_TOKENIZER_H #define Py_TOKENIZER_H #ifdef __cplusplus extern "C" { #endif #include "object.h" /* Tokenizer interface */ #include "token.h" /* For token types */ #define MAXINDENT 100 /* Max indentation level */ #define MAXLEVEL 200 /* Max parentheses level */ enum decoding_state { STATE_INIT, STATE_SEEK_CODING, STATE_NORMAL }; enum interactive_underflow_t { /* Normal mode of operation: return a new token when asked in interactie mode */ IUNDERFLOW_NORMAL, /* Forcefully return ENDMARKER when asked for a new token in interactive mode. This * can be used to prevent the tokenizer to prompt the user for new tokens */ IUNDERFLOW_STOP, }; /* Tokenizer state */ struct tok_state { /* Input state; buf <= cur <= inp <= end */ /* NB an entire line is held in the buffer */ char *buf; /* Input buffer, or NULL; malloc'ed if fp != NULL */ char *cur; /* Next character in buffer */ char *inp; /* End of data in buffer */ int fp_interactive; /* If the file descriptor is interactive */ char *interactive_src_start; /* The start of the source parsed so far in interactive mode */ char *interactive_src_end; /* The end of the source parsed so far in interactive mode */ const char *end; /* End of input buffer if buf != NULL */ const char *start; /* Start of current token if not NULL */ int done; /* E_OK normally, E_EOF at EOF, otherwise error code */ /* NB If done != E_OK, cur must be == inp!!! */ FILE *fp; /* Rest of input; NULL if tokenizing a string */ int tabsize; /* Tab spacing */ int indent; /* Current indentation index */ int indstack[MAXINDENT]; /* Stack of indents */ int atbol; /* Nonzero if at begin of new line */ int pendin; /* Pending indents (if > 0) or dedents (if < 0) */ const char *prompt, *nextprompt; /* For interactive prompting */ int lineno; /* Current line number */ int first_lineno; /* First line of a single line or multi line string expression (cf. issue 16806) */ int level; /* () [] {} Parentheses nesting level */ /* Used to allow free continuations inside them */ char parenstack[MAXLEVEL]; int parenlinenostack[MAXLEVEL]; int parencolstack[MAXLEVEL]; PyObject *filename; /* Stuff for checking on different tab sizes */ int altindstack[MAXINDENT]; /* Stack of alternate indents */ /* Stuff for PEP 0263 */ enum decoding_state decoding_state; int decoding_erred; /* whether erred in decoding */ char *encoding; /* Source encoding. */ int cont_line; /* whether we are in a continuation line. */ const char* line_start; /* pointer to start of current line */ const char* multi_line_start; /* pointer to start of first line of a single line or multi line string expression (cf. issue 16806) */ PyObject *decoding_readline; /* open(...).readline */ PyObject *decoding_buffer; const char* enc; /* Encoding for the current str. */ char* str; /* Source string being tokenized (if tokenizing from a string)*/ char* input; /* Tokenizer's newline translated copy of the string. */ int type_comments; /* Whether to look for type comments */ /* async/await related fields (still needed depending on feature_version) */ int async_hacks; /* =1 if async/await aren't always keywords */ int async_def; /* =1 if tokens are inside an 'async def' body. */ int async_def_indent; /* Indentation level of the outermost 'async def'. */ int async_def_nl; /* =1 if the outermost 'async def' had at least one NEWLINE token after it. */ /* How to proceed when asked for a new token in interactive mode */ enum interactive_underflow_t interactive_underflow; }; extern struct tok_state *PyTokenizer_FromString(const char *, int); extern struct tok_state *PyTokenizer_FromUTF8(const char *, int); extern struct tok_state *PyTokenizer_FromFile(FILE *, const char*, const char *, const char *); extern void PyTokenizer_Free(struct tok_state *); extern int PyTokenizer_Get(struct tok_state *, const char **, const char **); #define tok_dump _Py_tok_dump #ifdef __cplusplus } #endif #endif /* !Py_TOKENIZER_H */