Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#18719 Update HUB alt text

Merged
Glenn Jocher merged 1 commits into Ultralytics:main from ultralytics:glenn-jocher-patch-1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
  1. # Ultralytics ๐Ÿš€ AGPL-3.0 License - https://ultralytics.com/license
  2. """
  3. Automates the building and post-processing of MkDocs documentation, particularly for projects with multilingual content.
  4. It streamlines the workflow for generating localized versions of the documentation and updating HTML links to ensure
  5. they are correctly formatted.
  6. Key Features:
  7. - Automated building of MkDocs documentation: The script compiles both the main documentation and
  8. any localized versions specified in separate MkDocs configuration files.
  9. - Post-processing of generated HTML files: After the documentation is built, the script updates all
  10. HTML files to remove the '.md' extension from internal links. This ensures that links in the built
  11. HTML documentation correctly point to other HTML pages rather than Markdown files, which is crucial
  12. for proper navigation within the web-based documentation.
  13. Usage:
  14. - Run the script from the root directory of your MkDocs project.
  15. - Ensure that MkDocs is installed and that all MkDocs configuration files (main and localized versions)
  16. are present in the project directory.
  17. - The script first builds the documentation using MkDocs, then scans the generated HTML files in the 'site'
  18. directory to update the internal links.
  19. - It's ideal for projects where the documentation is written in Markdown and needs to be served as a static website.
  20. Note:
  21. - This script is built to be run in an environment where Python and MkDocs are installed and properly configured.
  22. """
  23. import json
  24. import os
  25. import re
  26. import shutil
  27. import subprocess
  28. from pathlib import Path
  29. from bs4 import BeautifulSoup
  30. from tqdm import tqdm
  31. os.environ["JUPYTER_PLATFORM_DIRS"] = "1" # fix DeprecationWarning: Jupyter is migrating to use standard platformdirs
  32. DOCS = Path(__file__).parent.resolve()
  33. SITE = DOCS.parent / "site"
  34. def create_vercel_config():
  35. """Create vercel.json in the site directory with customized configuration settings."""
  36. config = {"trailingSlash": True}
  37. with open(SITE / "vercel.json", "w") as f:
  38. json.dump(config, f, indent=2)
  39. def prepare_docs_markdown(clone_repos=True):
  40. """Build docs using mkdocs."""
  41. if SITE.exists():
  42. print(f"Removing existing {SITE}")
  43. shutil.rmtree(SITE)
  44. # Get hub-sdk repo
  45. if clone_repos:
  46. repo = "https://github.com/ultralytics/hub-sdk"
  47. local_dir = DOCS.parent / Path(repo).name
  48. if not local_dir.exists():
  49. os.system(f"git clone {repo} {local_dir}")
  50. os.system(f"git -C {local_dir} pull") # update repo
  51. shutil.rmtree(DOCS / "en/hub/sdk", ignore_errors=True) # delete if exists
  52. shutil.copytree(local_dir / "docs", DOCS / "en/hub/sdk") # for docs
  53. shutil.rmtree(DOCS.parent / "hub_sdk", ignore_errors=True) # delete if exists
  54. shutil.copytree(local_dir / "hub_sdk", DOCS.parent / "hub_sdk") # for mkdocstrings
  55. print(f"Cloned/Updated {repo} in {local_dir}")
  56. # Add frontmatter
  57. for file in tqdm((DOCS / "en").rglob("*.md"), desc="Adding frontmatter"):
  58. update_markdown_files(file)
  59. def update_page_title(file_path: Path, new_title: str):
  60. """Update the title of an HTML file."""
  61. # Read the content of the file
  62. with open(file_path, encoding="utf-8") as file:
  63. content = file.read()
  64. # Replace the existing title with the new title
  65. updated_content = re.sub(r"<title>.*?</title>", f"<title>{new_title}</title>", content)
  66. # Write the updated content back to the file
  67. with open(file_path, "w", encoding="utf-8") as file:
  68. file.write(updated_content)
  69. def update_html_head(script=""):
  70. """Update the HTML head section of each file."""
  71. html_files = Path(SITE).rglob("*.html")
  72. for html_file in tqdm(html_files, desc="Processing HTML files"):
  73. with html_file.open("r", encoding="utf-8") as file:
  74. html_content = file.read()
  75. if script in html_content: # script already in HTML file
  76. return
  77. head_end_index = html_content.lower().rfind("</head>")
  78. if head_end_index != -1:
  79. # Add the specified JavaScript to the HTML file just before the end of the head tag.
  80. new_html_content = html_content[:head_end_index] + script + html_content[head_end_index:]
  81. with html_file.open("w", encoding="utf-8") as file:
  82. file.write(new_html_content)
  83. def update_subdir_edit_links(subdir="", docs_url=""):
  84. """Update the HTML head section of each file."""
  85. if str(subdir[0]) == "/":
  86. subdir = str(subdir[0])[1:]
  87. html_files = (SITE / subdir).rglob("*.html")
  88. for html_file in tqdm(html_files, desc="Processing subdir files"):
  89. with html_file.open("r", encoding="utf-8") as file:
  90. soup = BeautifulSoup(file, "html.parser")
  91. # Find the anchor tag and update its href attribute
  92. a_tag = soup.find("a", {"class": "md-content__button md-icon"})
  93. if a_tag and a_tag["title"] == "Edit this page":
  94. a_tag["href"] = f"{docs_url}{a_tag['href'].split(subdir)[-1]}"
  95. # Write the updated HTML back to the file
  96. with open(html_file, "w", encoding="utf-8") as file:
  97. file.write(str(soup))
  98. def update_markdown_files(md_filepath: Path):
  99. """Creates or updates a Markdown file, ensuring frontmatter is present."""
  100. if md_filepath.exists():
  101. content = md_filepath.read_text().strip()
  102. # Replace apostrophes
  103. content = content.replace("โ€˜", "'").replace("โ€™", "'")
  104. # Add frontmatter if missing
  105. if not content.strip().startswith("---\n") and "macros" not in md_filepath.parts: # skip macros directory
  106. header = "---\ncomments: true\ndescription: TODO ADD DESCRIPTION\nkeywords: TODO ADD KEYWORDS\n---\n\n"
  107. content = header + content
  108. # Ensure MkDocs admonitions "=== " lines are preceded and followed by empty newlines
  109. lines = content.split("\n")
  110. new_lines = []
  111. for i, line in enumerate(lines):
  112. stripped_line = line.strip()
  113. if stripped_line.startswith("=== "):
  114. if i > 0 and new_lines[-1] != "":
  115. new_lines.append("")
  116. new_lines.append(line)
  117. if i < len(lines) - 1 and lines[i + 1].strip() != "":
  118. new_lines.append("")
  119. else:
  120. new_lines.append(line)
  121. content = "\n".join(new_lines)
  122. # Add EOF newline if missing
  123. if not content.endswith("\n"):
  124. content += "\n"
  125. # Save page
  126. md_filepath.write_text(content)
  127. return
  128. def update_docs_html():
  129. """Updates titles, edit links, head sections, and converts plaintext links in HTML documentation."""
  130. # Update 404 titles
  131. update_page_title(SITE / "404.html", new_title="Ultralytics Docs - Not Found")
  132. # Update edit links
  133. update_subdir_edit_links(
  134. subdir="hub/sdk/", # do not use leading slash
  135. docs_url="https://github.com/ultralytics/hub-sdk/tree/main/docs/",
  136. )
  137. # Convert plaintext links to HTML hyperlinks
  138. files_modified = 0
  139. for html_file in tqdm(SITE.rglob("*.html"), desc="Converting plaintext links"):
  140. with open(html_file, encoding="utf-8") as file:
  141. content = file.read()
  142. updated_content = convert_plaintext_links_to_html(content)
  143. if updated_content != content:
  144. with open(html_file, "w", encoding="utf-8") as file:
  145. file.write(updated_content)
  146. files_modified += 1
  147. print(f"Modified plaintext links in {files_modified} files.")
  148. # Update HTML file head section
  149. script = ""
  150. if any(script):
  151. update_html_head(script)
  152. # Delete the /macros directory from the built site
  153. macros_dir = SITE / "macros"
  154. if macros_dir.exists():
  155. print(f"Removing /macros directory from site: {macros_dir}")
  156. shutil.rmtree(macros_dir)
  157. def convert_plaintext_links_to_html(content):
  158. """Convert plaintext links to HTML hyperlinks in the main content area only."""
  159. soup = BeautifulSoup(content, "html.parser")
  160. # Find the main content area (adjust this selector based on your HTML structure)
  161. main_content = soup.find("main") or soup.find("div", class_="md-content")
  162. if not main_content:
  163. return content # Return original content if main content area not found
  164. modified = False
  165. for paragraph in main_content.find_all(["p", "li"]): # Focus on paragraphs and list items
  166. for text_node in paragraph.find_all(string=True, recursive=False):
  167. if text_node.parent.name not in {"a", "code"}: # Ignore links and code blocks
  168. new_text = re.sub(
  169. r"(https?://[^\s()<>]*[^\s()<>.,:;!?\'\"])",
  170. r'<a href="\1">\1</a>',
  171. str(text_node),
  172. )
  173. if "<a href=" in new_text:
  174. # Parse the new text with BeautifulSoup to handle HTML properly
  175. new_soup = BeautifulSoup(new_text, "html.parser")
  176. text_node.replace_with(new_soup)
  177. modified = True
  178. return str(soup) if modified else content
  179. def remove_macros():
  180. """Removes the /macros directory and related entries in sitemap.xml from the built site."""
  181. shutil.rmtree(SITE / "macros", ignore_errors=True)
  182. (SITE / "sitemap.xml.gz").unlink(missing_ok=True)
  183. # Process sitemap.xml
  184. sitemap = SITE / "sitemap.xml"
  185. lines = sitemap.read_text(encoding="utf-8").splitlines(keepends=True)
  186. # Find indices of '/macros/' lines
  187. macros_indices = [i for i, line in enumerate(lines) if "/macros/" in line]
  188. # Create a set of indices to remove (including lines before and after)
  189. indices_to_remove = set()
  190. for i in macros_indices:
  191. indices_to_remove.update(range(i - 1, i + 3)) # i-1, i, i+1, i+2, i+3
  192. # Create new list of lines, excluding the ones to remove
  193. new_lines = [line for i, line in enumerate(lines) if i not in indices_to_remove]
  194. # Write the cleaned content back to the file
  195. sitemap.write_text("".join(new_lines), encoding="utf-8")
  196. print(f"Removed {len(macros_indices)} URLs containing '/macros/' from {sitemap}")
  197. def minify_files(html=True, css=True, js=True):
  198. """Minifies HTML, CSS, and JS files and prints total reduction stats."""
  199. minify, compress, jsmin = None, None, None
  200. try:
  201. if html:
  202. from minify_html import minify
  203. if css:
  204. from csscompressor import compress
  205. if js:
  206. import jsmin
  207. except ImportError as e:
  208. print(f"Missing required package: {str(e)}")
  209. return
  210. stats = {}
  211. for ext, minifier in {
  212. "html": (lambda x: minify(x, keep_closing_tags=True, minify_css=True, minify_js=True)) if html else None,
  213. "css": compress if css else None,
  214. "js": jsmin.jsmin if js else None,
  215. }.items():
  216. if not minifier:
  217. continue
  218. stats[ext] = {"original": 0, "minified": 0}
  219. directory = "" # "stylesheets" if ext == css else "javascript" if ext == "js" else ""
  220. for f in tqdm((SITE / directory).rglob(f"*.{ext}"), desc=f"Minifying {ext.upper()}"):
  221. content = f.read_text(encoding="utf-8")
  222. minified = minifier(content)
  223. stats[ext]["original"] += len(content)
  224. stats[ext]["minified"] += len(minified)
  225. f.write_text(minified, encoding="utf-8")
  226. for ext, data in stats.items():
  227. if data["original"]:
  228. r = data["original"] - data["minified"] # reduction
  229. print(f"Total {ext.upper()} reduction: {(r / data['original']) * 100:.2f}% ({r / 1024:.2f} KB saved)")
  230. def main():
  231. """Builds docs, updates titles and edit links, minifies HTML, and prints local server command."""
  232. prepare_docs_markdown()
  233. # Build the main documentation
  234. print(f"Building docs from {DOCS}")
  235. subprocess.run(f"mkdocs build -f {DOCS.parent}/mkdocs.yml --strict", check=True, shell=True)
  236. remove_macros()
  237. create_vercel_config()
  238. print(f"Site built at {SITE}")
  239. # Update docs HTML pages
  240. update_docs_html()
  241. # Minify files
  242. minify_files(html=False, css=False, js=False)
  243. # Show command to serve built website
  244. print('Docs built correctly โœ…\nServe site at http://localhost:8000 with "python -m http.server --directory site"')
  245. if __name__ == "__main__":
  246. main()
Discard
Tip!

Press p or to see the previous file or, n or to see the next file