Compare commits

..

No commits in common. "13d3d653de0b0961f4258c8698328ef991cf10ea" and "9853ed432774ef19b5f1427b6ebea6909c43491e" have entirely different histories.

3 changed files with 30 additions and 138 deletions

View file

@ -28,18 +28,7 @@ FRENCH_MONTHS = ['janv.', 'févr.', 'mars', 'avr.', 'mai', 'juin',
class HighlightRenderer(mistune.HTMLRenderer):
"""Custom Mistune renderer that adds syntax highlighting to code blocks using Pygments."""
def block_code(self, code, info=None):
"""Render code blocks with syntax highlighting.
Args:
code: The code content to render
info: Optional language identifier for syntax highlighting
Returns:
str: HTML with syntax-highlighted code or plain pre/code tags
"""
if info:
lexer = get_lexer_by_name(info, stripall=True)
formatter = html.HtmlFormatter()
@ -71,20 +60,20 @@ def log(msg, *args):
sys.stderr.write(msg.format(*args) + "\n")
def _strip_tags_and_truncate(text, words=25):
"""Remove HTML tags and truncate text to the specified number of words."""
def truncate(text, words=25):
"""Remove tags and truncate text to the specified number of words."""
return " ".join(re.sub(r"(?s)<.*?>", " ", text).split()[:words])
def _parse_headers(text):
"""Parse HTML comment headers and yield (key, value, end-index) tuples."""
def read_headers(text):
"""Parse headers in text and yield (key, value, end-index) tuples."""
for match in re.finditer(r"\s*<!--\s*(.+?)\s*:\s*(.+?)\s*-->\s*|.+", text):
if not match.group(1):
break
yield match.group(1), match.group(2), match.end()
def _rfc_2822_format(date_str):
def rfc_2822_format(date_str):
"""Convert yyyy-mm-dd date string to RFC 2822 format date string."""
d = datetime.datetime.strptime(date_str, "%Y-%m-%d")
return d \
@ -107,8 +96,8 @@ def slugify(value):
return re.sub(r"[-\s]+", "-", value)
def parse_post_file(filename, params):
"""Parse post file: read, extract metadata, convert markdown, and generate summary."""
def read_content(filename, params):
"""Read content and metadata from file into a dictionary."""
# Read file content.
text = fread(filename)
@ -119,7 +108,7 @@ def parse_post_file(filename, params):
# Read headers.
end = 0
for key, val, end in _parse_headers(text):
for key, val, end in read_headers(text):
content[key] = val
# slugify post title
@ -132,20 +121,20 @@ def parse_post_file(filename, params):
if filename.endswith((".md", ".mkd", ".mkdn", ".mdown", ".markdown")):
summary_index = text.find("<!-- more")
if summary_index > 0:
summary = markdown(_strip_html_tags(text[:summary_index]))
summary = markdown(clean_html_tag(text[:summary_index]))
else:
summary = _strip_tags_and_truncate(markdown(_strip_html_tags(text)))
summary = truncate(markdown(clean_html_tag(text)))
clean_text = text.replace("<!-- more -->", "")
text = markdown(clean_text)
else:
summary = _strip_tags_and_truncate(text)
summary = truncate(text)
# Update the dictionary with content and RFC 2822 date.
content.update(
{
"content": text,
"content_rss": _make_links_absolute(params["site_url"], text),
"rfc_2822_date": _rfc_2822_format(content["date"]),
"content_rss": fix_relative_links(params["site_url"], text),
"rfc_2822_date": rfc_2822_format(content["date"]),
"summary": summary,
}
)
@ -153,16 +142,16 @@ def parse_post_file(filename, params):
return content
def _make_links_absolute(site_url, text):
"""Convert relative links to absolute URLs for RSS feed."""
def fix_relative_links(site_url, text):
"""Absolute links needed in RSS feed"""
# TODO externalize links replacement configuration
return text \
.replace("src=\"/images/20", "src=\"" + site_url + "/images/20") \
.replace("href=\"/20", "href=\"" + site_url + "/20")
def _strip_html_tags(text):
"""Remove HTML tags from text."""
def clean_html_tag(text):
"""Remove HTML tags."""
while True:
original_text = text
text = re.sub(r"<\w+.*?>", "", text)
@ -182,15 +171,6 @@ def render(template, **params):
def get_header_list_value(header_name, page_params):
"""Extract and parse a space-separated list from a header value.
Args:
header_name: Name of the header to extract (e.g., 'category', 'tag')
page_params: Dict containing page parameters
Returns:
list: List of stripped string values from the header
"""
header_list = []
if header_name in page_params:
for s in page_params[header_name].split(" "):
@ -285,15 +265,7 @@ def _process_comments(page_params, stacosys_url, comment_layout,
return len(comments), comments_html, comment_section_html
def _get_friendly_date(date_str):
"""Convert date string to French-formatted readable date.
Args:
date_str: Date string in YYYY-MM-DD format
Returns:
str: French-formatted date (e.g., "15 janv. 2024")
"""
def get_friendly_date(date_str):
dt = datetime.datetime.strptime(date_str, "%Y-%m-%d")
french_month = FRENCH_MONTHS[dt.month - 1]
return f"{dt.day:02d} {french_month} {dt.year}"
@ -334,7 +306,7 @@ def _setup_page_params(content, params):
page_params["header"] = ""
page_params["footer"] = ""
page_params["date_path"] = page_params["date"].replace("-", "/")
page_params["friendly_date"] = _get_friendly_date(page_params["date"])
page_params["friendly_date"] = get_friendly_date(page_params["date"])
page_params["year"] = page_params["date"].split("-")[0]
page_params["post_url"] = (
page_params["year"] + "/" + page_params["slug"] + "/"
@ -351,7 +323,7 @@ def make_posts(
for posix_path in Path(src).glob(src_pattern):
src_path = str(posix_path)
content = parse_post_file(src_path, params)
content = read_content(src_path, params)
# render text / summary for basic fields
content["content"] = render(content["content"], **params)
@ -404,7 +376,7 @@ def make_notes(
for posix_path in Path(src).glob(src_pattern):
src_path = str(posix_path)
content = parse_post_file(src_path, params)
content = read_content(src_path, params)
# render text / summary for basic fields
content["content"] = render(content["content"], **params)
@ -435,17 +407,7 @@ def make_list(
posts, dst, list_layout, item_layout,
header_layout, footer_layout, **params
):
"""Generate list page for a blog.
Args:
posts: List of post dictionaries to include in the list
dst: Destination path for the generated HTML file
list_layout: Template for the overall list page
item_layout: Template for individual list items
header_layout: Template for page header (None to skip)
footer_layout: Template for page footer (None to skip)
**params: Additional parameters for template rendering
"""
"""Generate list page for a blog."""
# header
if header_layout is None:
@ -485,16 +447,6 @@ def make_list(
def create_blog(page_layout, list_in_page_layout, params):
"""Create blog posts and paginated index pages.
Args:
page_layout: Template for individual pages
list_in_page_layout: Template for list pages wrapped in page layout
params: Global site parameters
Returns:
list: Sorted list of all post dictionaries (newest first)
"""
banner_layout = fread("layout/banner.html")
paging_layout = fread("layout/paging.html")
post_layout = fread("layout/post.html")
@ -557,14 +509,6 @@ def create_blog(page_layout, list_in_page_layout, params):
def generate_categories(list_in_page_layout, item_nosummary_layout,
posts, params):
"""Generate category pages grouping posts by category.
Args:
list_in_page_layout: Template for list pages
item_nosummary_layout: Template for list items without summaries
posts: List of all blog posts
params: Global site parameters
"""
category_title_layout = fread("layout/category_title.html")
cat_post = {}
for post in posts:
@ -588,15 +532,6 @@ def generate_categories(list_in_page_layout, item_nosummary_layout,
def generate_archives(blog_posts, list_in_page_layout, item_nosummary_layout,
archive_title_layout, params):
"""Generate archives page with all blog posts.
Args:
blog_posts: List of all blog posts
list_in_page_layout: Template for list pages
item_nosummary_layout: Template for list items without summaries
archive_title_layout: Template for archive page header
params: Global site parameters
"""
make_list(
blog_posts,
"_site/archives/index.html",
@ -610,14 +545,6 @@ def generate_archives(blog_posts, list_in_page_layout, item_nosummary_layout,
def generate_notes(page_layout, archive_title_layout,
list_in_page_layout, params):
"""Generate notes pages and notes index.
Args:
page_layout: Template for individual pages
archive_title_layout: Template for notes index header
list_in_page_layout: Template for list pages
params: Global site parameters
"""
note_layout = fread("layout/note.html")
item_note_layout = fread("layout/item_note.html")
note_layout = render(page_layout, content=note_layout)
@ -642,12 +569,6 @@ def generate_notes(page_layout, archive_title_layout,
def generate_rss_feeds(posts, params):
"""Generate RSS feeds: main feed and per-tag feeds.
Args:
posts: List of all blog posts
params: Global site parameters
"""
rss_xml = fread("layout/rss.xml")
rss_item_xml = fread("layout/rss_item.xml")
@ -685,12 +606,6 @@ def generate_rss_feeds(posts, params):
def generate_sitemap(posts, params):
"""Generate XML sitemap for all posts.
Args:
posts: List of all blog posts
params: Global site parameters
"""
sitemap_xml = fread("layout/sitemap.xml")
sitemap_item_xml = fread("layout/sitemap_item.xml")
make_list(
@ -705,14 +620,6 @@ def generate_sitemap(posts, params):
def get_params(param_file):
"""Load site parameters from JSON file with defaults.
Args:
param_file: Path to JSON parameters file
Returns:
dict: Site parameters with defaults and loaded values
"""
# Default parameters.
params = {
"title": "Blog",
@ -729,24 +636,18 @@ def get_params(param_file):
return params
def rebuild_site_directory():
"""Remove existing _site directory and recreate from static files."""
def clean_site():
if os.path.isdir("_site"):
shutil.rmtree("_site")
shutil.copytree("static", "_site")
def main(param_file):
"""Main entry point for static site generation.
Args:
param_file: Path to JSON parameters file
"""
params = get_params(param_file)
# Create a new _site directory from scratch.
rebuild_site_directory()
clean_site()
# Load layouts.
page_layout = fread("layout/page.html")

View file

@ -14,12 +14,7 @@ def fread(filename):
return f.read()
def get_comment_count():
"""Fetch the total number of comments from Stacosys API.
Returns:
int: Total comment count, or 0 if request fails
"""
def get_nb_of_comments():
req_url = params["stacosys_url"] + "/comments/count"
query_params = dict(
token=params["stacosys_token"]
@ -28,8 +23,7 @@ def get_comment_count():
return 0 if not resp.ok else int(resp.json()["count"])
def _exit_program():
"""Exit the program with status code 0."""
def exit_program():
sys.exit(0)
@ -45,14 +39,14 @@ if os.path.isfile("params.json"):
params.update(json.loads(fread("params.json")))
external_check_cmd = params["external_check"]
initial_count = get_comment_count()
initial_count = get_nb_of_comments()
print(f"Comments = {initial_count}")
while True:
# check number of comments every 60 seconds
for _ in range(15):
time.sleep(60)
if initial_count != get_comment_count():
_exit_program()
if initial_count != get_nb_of_comments():
exit_program()
# check if git repo changed every 15 minutes
if external_check_cmd and os.system(external_check_cmd):
_exit_program()
exit_program()

View file

@ -16,7 +16,4 @@ dependencies = [
[dependency-groups]
dev = [
"black>=24.10.0",
"mypy>=1.19.1",
"types-pygments>=2.19.0.20251121",
"types-requests>=2.32.4.20260107",
]