def logs(request: HttpRequest, tool: str = "all") -> HttpResponse:
"""Logs listing."""
if not request.user.is_staff:
return render_error(request, "You need to be an admin to see logs.")
log_lines: typing.List[str] = []
if tool == "all" or tool == "webcrawler":
f = open(MAIN_LOGGER, 'rt', encoding='utf8')
log_lines = f.read().split('[0m\n')
f.close()
log_lines.pop()
log_lines.reverse()
log_filter = request.GET.get('filter', '')
if log_filter:
log_lines = [x for x in log_lines if log_filter.lower() in x.lower()]
current_base_uri = re.escape('{scheme}://{host}'.format(scheme=request.scheme, host=request.get_host()))
# Build complete URL for relative internal URLs (some)
if crawler_settings.urls.viewer_main_url:
patterns = [
r'(?!' + current_base_uri + r')/' + crawler_settings.urls.viewer_main_url + r"archive/\d+/?",
r'(?!' + current_base_uri + r')/' + crawler_settings.urls.viewer_main_url + r"gallery/\d+/?",
r'(?!' + current_base_uri + r')/' + crawler_settings.urls.viewer_main_url + r"wanted-gallery/\d+/?",
]
else:
patterns = [
r'(?<!' + current_base_uri + r')/archive/\d+/?',
r'(?<!' + current_base_uri + r')/gallery/\d+/?',
r'(?<!' + current_base_uri + r')/wanted-gallery/\d+/?',
]
def build_request(match_obj: typing.Match) -> str:
return request.build_absolute_uri(match_obj.group(0))
log_lines = [reduce(lambda v, pattern: re.sub(pattern, build_request, v), patterns, line) for line in log_lines]
paginator = Paginator(log_lines, 100)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
log_lines = paginator.page(page)
except (InvalidPage, EmptyPage):
log_lines = paginator.page(paginator.num_pages)
d = {'log_lines': log_lines}
return render(request, "viewer/logs.html", d)
评论列表
文章目录