Compare commits
86 Commits
gagb/add-g
...
v0.0.1a3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3ce21a47ab | ||
|
|
9518c01d4e | ||
|
|
22504551ef | ||
|
|
95188a4a27 | ||
|
|
e69d012b86 | ||
|
|
03a7843a0a | ||
|
|
248d64edd0 | ||
|
|
ad5d4fb139 | ||
|
|
ad29122592 | ||
|
|
898bfd4774 | ||
|
|
c8980d9f41 | ||
|
|
24b52b2b8f | ||
|
|
09159aa04e | ||
|
|
77f620b568 | ||
|
|
825d3bbb77 | ||
|
|
c0127af120 | ||
|
|
33cb5015eb | ||
|
|
cf13b7e657 | ||
|
|
874eba6265 | ||
|
|
c3fa2934b9 | ||
|
|
736e7d9a7e | ||
|
|
19c111251b | ||
|
|
360c2dd95f | ||
|
|
87846cf5f8 | ||
|
|
33638f1fe6 | ||
|
|
73776b2c0f | ||
|
|
2d3ffeade1 | ||
|
|
51c1453699 | ||
|
|
ae4669107c | ||
|
|
b0115cf971 | ||
|
|
5cf8474f37 | ||
|
|
83dc81170b | ||
|
|
e7a2e20d93 | ||
|
|
980abd3a60 | ||
|
|
6587e0f097 | ||
|
|
978c8763aa | ||
|
|
e7636656d8 | ||
|
|
ddc1bebea4 | ||
|
|
fa1f496d51 | ||
|
|
da779dd125 | ||
|
|
12ce5e95b2 | ||
|
|
6dad1cca96 | ||
|
|
9e6a19987b | ||
|
|
ed91e8b534 | ||
|
|
aeff2cb5ae | ||
|
|
c9c7d98d30 | ||
|
|
e7d9b5546a | ||
|
|
ed651aeb16 | ||
|
|
3d9f3f3e5b | ||
|
|
ad01da308d | ||
|
|
010f841008 | ||
|
|
5fc03b6415 | ||
|
|
013b022427 | ||
|
|
695100d5d8 | ||
|
|
d66ef5fcca | ||
|
|
c168703d5e | ||
|
|
3548c96dd3 | ||
|
|
1559d9d163 | ||
|
|
b7f5662ffd | ||
|
|
0a7203b876 | ||
|
|
0704b0b6ff | ||
|
|
0dd4e95584 | ||
|
|
93130b5ba5 | ||
|
|
52b723724c | ||
|
|
a55c3d525c | ||
|
|
81e3f24acd | ||
|
|
b84294620a | ||
|
|
60c495d609 | ||
|
|
71123a4df3 | ||
|
|
5753e553fe | ||
|
|
752dd897b9 | ||
|
|
1aa4abe90f | ||
|
|
ea7c6dcc40 | ||
|
|
a31c0a13e7 | ||
|
|
30ab78fe9e | ||
|
|
559b1fc62a | ||
|
|
df03382218 | ||
|
|
18301edcd0 | ||
|
|
4987201ef6 | ||
|
|
571c5bbc0e | ||
|
|
e8ea8b6f3d | ||
|
|
7e634acf5f | ||
|
|
862c39029e | ||
|
|
33ce17954d | ||
|
|
6ebef5af0c | ||
|
|
3f9ba06418 |
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
|||||||
|
*
|
||||||
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
tests/test_files/** linguist-vendored
|
||||||
16
Dockerfile
Normal file
16
Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
FROM python:3.13-alpine
|
||||||
|
|
||||||
|
USER root
|
||||||
|
|
||||||
|
# Runtime dependency
|
||||||
|
RUN apk add --no-cache ffmpeg
|
||||||
|
|
||||||
|
RUN pip install markitdown
|
||||||
|
|
||||||
|
# Default USERID and GROUPID
|
||||||
|
ARG USERID=10000
|
||||||
|
ARG GROUPID=10000
|
||||||
|
|
||||||
|
USER $USERID:$GROUPID
|
||||||
|
|
||||||
|
ENTRYPOINT [ "markitdown" ]
|
||||||
74
README.md
74
README.md
@@ -1,5 +1,7 @@
|
|||||||
# MarkItDown
|
# MarkItDown
|
||||||
|
|
||||||
|
[](https://pypi.org/project/markitdown/)
|
||||||
|
|
||||||
The MarkItDown library is a utility tool for converting various files to Markdown (e.g., for indexing, text analysis, etc.)
|
The MarkItDown library is a utility tool for converting various files to Markdown (e.g., for indexing, text analysis, etc.)
|
||||||
|
|
||||||
It presently supports:
|
It presently supports:
|
||||||
@@ -12,7 +14,23 @@ It presently supports:
|
|||||||
- Audio (EXIF metadata, and speech transcription)
|
- Audio (EXIF metadata, and speech transcription)
|
||||||
- HTML (special handling of Wikipedia, etc.)
|
- HTML (special handling of Wikipedia, etc.)
|
||||||
- Various other text-based formats (csv, json, xml, etc.)
|
- Various other text-based formats (csv, json, xml, etc.)
|
||||||
|
- ZIP (Iterates over contents and converts each file)
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
|
||||||
|
You can install `markitdown` using pip:
|
||||||
|
|
||||||
|
```python
|
||||||
|
pip install markitdown
|
||||||
|
```
|
||||||
|
|
||||||
|
or from the source
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install -e .
|
||||||
|
```
|
||||||
|
|
||||||
|
# Usage
|
||||||
The API is simple:
|
The API is simple:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -23,6 +41,44 @@ result = markitdown.convert("test.xlsx")
|
|||||||
print(result.text_content)
|
print(result.text_content)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To use this as a command-line utility, install it and then run it like this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
markitdown path-to-file.pdf
|
||||||
|
```
|
||||||
|
|
||||||
|
This will output Markdown to standard output. You can save it like this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
markitdown path-to-file.pdf > document.md
|
||||||
|
```
|
||||||
|
|
||||||
|
You can pipe content to standard input by omitting the argument:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat path-to-file.pdf | markitdown
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also configure markitdown to use Large Language Models to describe images. To do so you must provide `llm_client` and `llm_model` parameters to MarkItDown object, according to your specific client.
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
from markitdown import MarkItDown
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
client = OpenAI()
|
||||||
|
md = MarkItDown(llm_client=client, llm_model="gpt-4o")
|
||||||
|
result = md.convert("example.jpg")
|
||||||
|
print(result.text_content)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also use the project as Docker Image:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker build -t markitdown:latest .
|
||||||
|
docker run --rm -i markitdown:latest < ~/your-file.pdf > output.md
|
||||||
|
```
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||||
@@ -37,6 +93,24 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
|
|||||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
To run tests, install `hatch` using `pip` or other methods as described [here](https://hatch.pypa.io/dev/install).
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install hatch
|
||||||
|
hatch shell
|
||||||
|
hatch test
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running Pre-commit Checks
|
||||||
|
|
||||||
|
Please run the pre-commit checks before submitting a PR.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pre-commit run --all-files
|
||||||
|
```
|
||||||
|
|
||||||
## Trademarks
|
## Trademarks
|
||||||
|
|
||||||
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
|
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
|
||||||
|
|||||||
@@ -38,7 +38,8 @@ dependencies = [
|
|||||||
"youtube-transcript-api",
|
"youtube-transcript-api",
|
||||||
"SpeechRecognition",
|
"SpeechRecognition",
|
||||||
"pathvalidate",
|
"pathvalidate",
|
||||||
"pygithub"
|
"charset-normalizer",
|
||||||
|
"openai",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
@@ -77,3 +78,6 @@ exclude_lines = [
|
|||||||
"if __name__ == .__main__.:",
|
"if __name__ == .__main__.:",
|
||||||
"if TYPE_CHECKING:",
|
"if TYPE_CHECKING:",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[tool.hatch.build.targets.sdist]
|
||||||
|
only-include = ["src/markitdown"]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
|
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: MIT
|
# SPDX-License-Identifier: MIT
|
||||||
__version__ = "0.0.1a1"
|
__version__ = "0.0.1a3"
|
||||||
|
|||||||
@@ -2,21 +2,15 @@
|
|||||||
#
|
#
|
||||||
# SPDX-License-Identifier: MIT
|
# SPDX-License-Identifier: MIT
|
||||||
import sys
|
import sys
|
||||||
|
import argparse
|
||||||
from ._markitdown import MarkItDown
|
from ._markitdown import MarkItDown
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if len(sys.argv) == 1:
|
parser = argparse.ArgumentParser(
|
||||||
markitdown = MarkItDown()
|
description="Convert various file formats to markdown.",
|
||||||
result = markitdown.convert_stream(sys.stdin.buffer)
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
print(result.text_content)
|
usage="""
|
||||||
elif len(sys.argv) == 2:
|
|
||||||
markitdown = MarkItDown()
|
|
||||||
result = markitdown.convert(sys.argv[1])
|
|
||||||
print(result.text_content)
|
|
||||||
else:
|
|
||||||
sys.stderr.write(
|
|
||||||
"""
|
|
||||||
SYNTAX:
|
SYNTAX:
|
||||||
|
|
||||||
markitdown <OPTIONAL: FILENAME>
|
markitdown <OPTIONAL: FILENAME>
|
||||||
@@ -33,9 +27,20 @@ EXAMPLE:
|
|||||||
OR
|
OR
|
||||||
|
|
||||||
markitdown < example.pdf
|
markitdown < example.pdf
|
||||||
""".strip()
|
""".strip(),
|
||||||
+ "\n"
|
)
|
||||||
)
|
|
||||||
|
parser.add_argument("filename", nargs="?")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.filename is None:
|
||||||
|
markitdown = MarkItDown()
|
||||||
|
result = markitdown.convert_stream(sys.stdin.buffer)
|
||||||
|
print(result.text_content)
|
||||||
|
else:
|
||||||
|
markitdown = MarkItDown()
|
||||||
|
result = markitdown.convert(args.filename)
|
||||||
|
print(result.text_content)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -12,8 +12,10 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
import traceback
|
import traceback
|
||||||
|
import zipfile
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
from urllib.parse import parse_qs, quote, unquote, urlparse, urlunparse
|
from urllib.parse import parse_qs, quote, unquote, urlparse, urlunparse
|
||||||
|
from warnings import warn, resetwarnings, catch_warnings
|
||||||
|
|
||||||
import mammoth
|
import mammoth
|
||||||
import markdownify
|
import markdownify
|
||||||
@@ -26,15 +28,24 @@ import pptx
|
|||||||
import puremagic
|
import puremagic
|
||||||
import requests
|
import requests
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
from charset_normalizer import from_path
|
||||||
|
|
||||||
# Optional Transcription support
|
# Optional Transcription support
|
||||||
try:
|
try:
|
||||||
import pydub
|
# Using warnings' catch_warnings to catch
|
||||||
|
# pydub's warning of ffmpeg or avconv missing
|
||||||
|
with catch_warnings(record=True) as w:
|
||||||
|
import pydub
|
||||||
|
|
||||||
|
if w:
|
||||||
|
raise ModuleNotFoundError
|
||||||
import speech_recognition as sr
|
import speech_recognition as sr
|
||||||
|
|
||||||
IS_AUDIO_TRANSCRIPTION_CAPABLE = True
|
IS_AUDIO_TRANSCRIPTION_CAPABLE = True
|
||||||
except ModuleNotFoundError:
|
except ModuleNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
finally:
|
||||||
|
resetwarnings()
|
||||||
|
|
||||||
# Optional YouTube transcription support
|
# Optional YouTube transcription support
|
||||||
try:
|
try:
|
||||||
@@ -44,14 +55,6 @@ try:
|
|||||||
except ModuleNotFoundError:
|
except ModuleNotFoundError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Optional GitHub issue support
|
|
||||||
try:
|
|
||||||
from github import Github
|
|
||||||
|
|
||||||
IS_GITHUB_ISSUE_CAPABLE = True
|
|
||||||
except ModuleNotFoundError:
|
|
||||||
IS_GITHUB_ISSUE_CAPABLE = False
|
|
||||||
|
|
||||||
|
|
||||||
class _CustomMarkdownify(markdownify.MarkdownConverter):
|
class _CustomMarkdownify(markdownify.MarkdownConverter):
|
||||||
"""
|
"""
|
||||||
@@ -169,9 +172,7 @@ class PlainTextConverter(DocumentConverter):
|
|||||||
elif "text/" not in content_type.lower():
|
elif "text/" not in content_type.lower():
|
||||||
return None
|
return None
|
||||||
|
|
||||||
text_content = ""
|
text_content = str(from_path(local_path).best())
|
||||||
with open(local_path, "rt", encoding="utf-8") as fh:
|
|
||||||
text_content = fh.read()
|
|
||||||
return DocumentConverterResult(
|
return DocumentConverterResult(
|
||||||
title=None,
|
title=None,
|
||||||
text_content=text_content,
|
text_content=text_content,
|
||||||
@@ -352,8 +353,11 @@ class YouTubeConverter(DocumentConverter):
|
|||||||
assert isinstance(params["v"][0], str)
|
assert isinstance(params["v"][0], str)
|
||||||
video_id = str(params["v"][0])
|
video_id = str(params["v"][0])
|
||||||
try:
|
try:
|
||||||
|
youtube_transcript_languages = kwargs.get(
|
||||||
|
"youtube_transcript_languages", ("en",)
|
||||||
|
)
|
||||||
# Must be a single transcript.
|
# Must be a single transcript.
|
||||||
transcript = YouTubeTranscriptApi.get_transcript(video_id) # type: ignore
|
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=youtube_transcript_languages) # type: ignore
|
||||||
transcript_text = " ".join([part["text"] for part in transcript]) # type: ignore
|
transcript_text = " ".join([part["text"] for part in transcript]) # type: ignore
|
||||||
# Alternative formatting:
|
# Alternative formatting:
|
||||||
# formatter = TextFormatter()
|
# formatter = TextFormatter()
|
||||||
@@ -500,7 +504,9 @@ class DocxConverter(HtmlConverter):
|
|||||||
|
|
||||||
result = None
|
result = None
|
||||||
with open(local_path, "rb") as docx_file:
|
with open(local_path, "rb") as docx_file:
|
||||||
result = mammoth.convert_to_html(docx_file)
|
style_map = kwargs.get("style_map", None)
|
||||||
|
|
||||||
|
result = mammoth.convert_to_html(docx_file, style_map=style_map)
|
||||||
html_content = result.value
|
html_content = result.value
|
||||||
result = self._convert(html_content)
|
result = self._convert(html_content)
|
||||||
|
|
||||||
@@ -590,6 +596,10 @@ class PptxConverter(HtmlConverter):
|
|||||||
"\n" + self._convert(html_table).text_content.strip() + "\n"
|
"\n" + self._convert(html_table).text_content.strip() + "\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Charts
|
||||||
|
if shape.has_chart:
|
||||||
|
md_content += self._convert_chart_to_markdown(shape.chart)
|
||||||
|
|
||||||
# Text areas
|
# Text areas
|
||||||
elif shape.has_text_frame:
|
elif shape.has_text_frame:
|
||||||
if shape == title:
|
if shape == title:
|
||||||
@@ -624,6 +634,29 @@ class PptxConverter(HtmlConverter):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def _convert_chart_to_markdown(self, chart):
|
||||||
|
md = "\n\n### Chart"
|
||||||
|
if chart.has_title:
|
||||||
|
md += f": {chart.chart_title.text_frame.text}"
|
||||||
|
md += "\n\n"
|
||||||
|
data = []
|
||||||
|
category_names = [c.label for c in chart.plots[0].categories]
|
||||||
|
series_names = [s.name for s in chart.series]
|
||||||
|
data.append(["Category"] + series_names)
|
||||||
|
|
||||||
|
for idx, category in enumerate(category_names):
|
||||||
|
row = [category]
|
||||||
|
for series in chart.series:
|
||||||
|
row.append(series.values[idx])
|
||||||
|
data.append(row)
|
||||||
|
|
||||||
|
markdown_table = []
|
||||||
|
for row in data:
|
||||||
|
markdown_table.append("| " + " | ".join(map(str, row)) + " |")
|
||||||
|
header = markdown_table[0]
|
||||||
|
separator = "|" + "|".join(["---"] * len(data[0])) + "|"
|
||||||
|
return md + "\n".join([header, separator] + markdown_table[1:])
|
||||||
|
|
||||||
|
|
||||||
class MediaConverter(DocumentConverter):
|
class MediaConverter(DocumentConverter):
|
||||||
"""
|
"""
|
||||||
@@ -762,7 +795,7 @@ class Mp3Converter(WavConverter):
|
|||||||
|
|
||||||
class ImageConverter(MediaConverter):
|
class ImageConverter(MediaConverter):
|
||||||
"""
|
"""
|
||||||
Converts images to markdown via extraction of metadata (if `exiftool` is installed), OCR (if `easyocr` is installed), and description via a multimodal LLM (if an mlm_client is configured).
|
Converts images to markdown via extraction of metadata (if `exiftool` is installed), OCR (if `easyocr` is installed), and description via a multimodal LLM (if an llm_client is configured).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
|
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
|
||||||
@@ -792,17 +825,17 @@ class ImageConverter(MediaConverter):
|
|||||||
md_content += f"{f}: {metadata[f]}\n"
|
md_content += f"{f}: {metadata[f]}\n"
|
||||||
|
|
||||||
# Try describing the image with GPTV
|
# Try describing the image with GPTV
|
||||||
mlm_client = kwargs.get("mlm_client")
|
llm_client = kwargs.get("llm_client")
|
||||||
mlm_model = kwargs.get("mlm_model")
|
llm_model = kwargs.get("llm_model")
|
||||||
if mlm_client is not None and mlm_model is not None:
|
if llm_client is not None and llm_model is not None:
|
||||||
md_content += (
|
md_content += (
|
||||||
"\n# Description:\n"
|
"\n# Description:\n"
|
||||||
+ self._get_mlm_description(
|
+ self._get_llm_description(
|
||||||
local_path,
|
local_path,
|
||||||
extension,
|
extension,
|
||||||
mlm_client,
|
llm_client,
|
||||||
mlm_model,
|
llm_model,
|
||||||
prompt=kwargs.get("mlm_prompt"),
|
prompt=kwargs.get("llm_prompt"),
|
||||||
).strip()
|
).strip()
|
||||||
+ "\n"
|
+ "\n"
|
||||||
)
|
)
|
||||||
@@ -812,12 +845,10 @@ class ImageConverter(MediaConverter):
|
|||||||
text_content=md_content,
|
text_content=md_content,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_mlm_description(self, local_path, extension, client, model, prompt=None):
|
def _get_llm_description(self, local_path, extension, client, model, prompt=None):
|
||||||
if prompt is None or prompt.strip() == "":
|
if prompt is None or prompt.strip() == "":
|
||||||
prompt = "Write a detailed caption for this image."
|
prompt = "Write a detailed caption for this image."
|
||||||
|
|
||||||
sys.stderr.write(f"MLM Prompt:\n{prompt}\n")
|
|
||||||
|
|
||||||
data_uri = ""
|
data_uri = ""
|
||||||
with open(local_path, "rb") as image_file:
|
with open(local_path, "rb") as image_file:
|
||||||
content_type, encoding = mimetypes.guess_type("_dummy" + extension)
|
content_type, encoding = mimetypes.guess_type("_dummy" + extension)
|
||||||
@@ -845,126 +876,122 @@ class ImageConverter(MediaConverter):
|
|||||||
return response.choices[0].message.content
|
return response.choices[0].message.content
|
||||||
|
|
||||||
|
|
||||||
class GitHubIssueConverter(DocumentConverter):
|
class ZipConverter(DocumentConverter):
|
||||||
"""Converts GitHub issues and pull requests to Markdown."""
|
"""Converts ZIP files to markdown by extracting and converting all contained files.
|
||||||
|
|
||||||
def convert(self, github_url, github_token) -> Union[None, DocumentConverterResult]:
|
The converter extracts the ZIP contents to a temporary directory, processes each file
|
||||||
# Bail if not a valid GitHub issue or pull request URL
|
using appropriate converters based on file extensions, and then combines the results
|
||||||
if github_url:
|
into a single markdown document. The temporary directory is cleaned up after processing.
|
||||||
parsed_url = urlparse(github_url)
|
|
||||||
path_parts = parsed_url.path.strip("/").split("/")
|
|
||||||
if len(path_parts) < 4 or path_parts[2] not in ["issues", "pull"]:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if not github_token:
|
Example output format:
|
||||||
raise ValueError(
|
```markdown
|
||||||
"GitHub token is not set. Cannot convert GitHub issue or pull request."
|
Content from the zip file `example.zip`:
|
||||||
)
|
|
||||||
|
|
||||||
if path_parts[2] == "issues":
|
## File: docs/readme.txt
|
||||||
return self._convert_github_issue(github_url, github_token)
|
|
||||||
elif path_parts[2] == "pull":
|
|
||||||
return self._convert_github_pr(github_url, github_token)
|
|
||||||
|
|
||||||
return None
|
This is the content of readme.txt
|
||||||
|
Multiple lines are preserved
|
||||||
|
|
||||||
def _convert_github_issue(
|
## File: images/example.jpg
|
||||||
self, issue_url: str, github_token: str
|
|
||||||
) -> DocumentConverterResult:
|
ImageSize: 1920x1080
|
||||||
"""
|
DateTimeOriginal: 2024-02-15 14:30:00
|
||||||
Convert a GitHub issue to a markdown document.
|
Description: A beautiful landscape photo
|
||||||
Args:
|
|
||||||
issue_url (str): The URL of the GitHub issue to convert.
|
## File: data/report.xlsx
|
||||||
github_token (str): A GitHub token with access to the repository.
|
|
||||||
Returns:
|
## Sheet1
|
||||||
DocumentConverterResult: The result containing the issue title and markdown content.
|
| Column1 | Column2 | Column3 |
|
||||||
Raises:
|
|---------|---------|---------|
|
||||||
ImportError: If the PyGithub library is not installed.
|
| data1 | data2 | data3 |
|
||||||
ValueError: If the provided URL is not a valid GitHub issue URL.
|
| data4 | data5 | data6 |
|
||||||
"""
|
```
|
||||||
if not IS_GITHUB_ISSUE_CAPABLE:
|
|
||||||
raise ImportError(
|
Key features:
|
||||||
"PyGithub is not installed. Please install it to use this feature."
|
- Maintains original file structure in headings
|
||||||
|
- Processes nested files recursively
|
||||||
|
- Uses appropriate converters for each file type
|
||||||
|
- Preserves formatting of converted content
|
||||||
|
- Cleans up temporary files after processing
|
||||||
|
"""
|
||||||
|
|
||||||
|
def convert(
|
||||||
|
self, local_path: str, **kwargs: Any
|
||||||
|
) -> Union[None, DocumentConverterResult]:
|
||||||
|
# Bail if not a ZIP
|
||||||
|
extension = kwargs.get("file_extension", "")
|
||||||
|
if extension.lower() != ".zip":
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get parent converters list if available
|
||||||
|
parent_converters = kwargs.get("_parent_converters", [])
|
||||||
|
if not parent_converters:
|
||||||
|
return DocumentConverterResult(
|
||||||
|
title=None,
|
||||||
|
text_content=f"[ERROR] No converters available to process zip contents from: {local_path}",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Parse the issue URL
|
extracted_zip_folder_name = (
|
||||||
parsed_url = urlparse(issue_url)
|
f"extracted_{os.path.basename(local_path).replace('.zip', '_zip')}"
|
||||||
path_parts = parsed_url.path.strip("/").split("/")
|
|
||||||
if len(path_parts) < 4 or path_parts[2] != "issues":
|
|
||||||
raise ValueError("Invalid GitHub issue URL")
|
|
||||||
|
|
||||||
owner, repo, _, issue_number = path_parts[:4]
|
|
||||||
|
|
||||||
# Authenticate with GitHub
|
|
||||||
g = Github(github_token)
|
|
||||||
repo = g.get_repo(f"{owner}/{repo}")
|
|
||||||
issue = repo.get_issue(int(issue_number))
|
|
||||||
|
|
||||||
# Convert issue details to markdown
|
|
||||||
markdown_content = f"# {issue.title}\n\n{issue.body}\n\n"
|
|
||||||
markdown_content += f"**State:** {issue.state}\n"
|
|
||||||
markdown_content += f"**Created at:** {issue.created_at}\n"
|
|
||||||
markdown_content += f"**Updated at:** {issue.updated_at}\n"
|
|
||||||
markdown_content += f"**Comments:**\n"
|
|
||||||
|
|
||||||
for comment in issue.get_comments():
|
|
||||||
markdown_content += (
|
|
||||||
f"- {comment.user.login} ({comment.created_at}): {comment.body}\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
return DocumentConverterResult(
|
|
||||||
title=issue.title,
|
|
||||||
text_content=markdown_content,
|
|
||||||
)
|
)
|
||||||
|
new_folder = os.path.normpath(
|
||||||
def _convert_github_pr(
|
os.path.join(os.path.dirname(local_path), extracted_zip_folder_name)
|
||||||
self, pr_url: str, github_token: str
|
|
||||||
) -> DocumentConverterResult:
|
|
||||||
"""
|
|
||||||
Convert a GitHub pull request to a markdown document.
|
|
||||||
Args:
|
|
||||||
pr_url (str): The URL of the GitHub pull request to convert.
|
|
||||||
github_token (str): A GitHub token with access to the repository.
|
|
||||||
Returns:
|
|
||||||
DocumentConverterResult: The result containing the pull request title and markdown content.
|
|
||||||
Raises:
|
|
||||||
ImportError: If the PyGithub library is not installed.
|
|
||||||
ValueError: If the provided URL is not a valid GitHub pull request URL.
|
|
||||||
"""
|
|
||||||
if not IS_GITHUB_ISSUE_CAPABLE:
|
|
||||||
raise ImportError(
|
|
||||||
"PyGithub is not installed. Please install it to use this feature."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Parse the pull request URL
|
|
||||||
parsed_url = urlparse(pr_url)
|
|
||||||
path_parts = parsed_url.path.strip("/").split("/")
|
|
||||||
if len(path_parts) < 4 or path_parts[2] != "pull":
|
|
||||||
raise ValueError("Invalid GitHub pull request URL")
|
|
||||||
|
|
||||||
owner, repo, _, pr_number = path_parts[:4]
|
|
||||||
|
|
||||||
# Authenticate with GitHub
|
|
||||||
g = Github(github_token)
|
|
||||||
repo = g.get_repo(f"{owner}/{repo}")
|
|
||||||
pr = repo.get_pull(int(pr_number))
|
|
||||||
|
|
||||||
# Convert pull request details to markdown
|
|
||||||
markdown_content = f"# {pr.title}\n\n{pr.body}\n\n"
|
|
||||||
markdown_content += f"**State:** {pr.state}\n"
|
|
||||||
markdown_content += f"**Created at:** {pr.created_at}\n"
|
|
||||||
markdown_content += f"**Updated at:** {pr.updated_at}\n"
|
|
||||||
markdown_content += f"**Comments:**\n"
|
|
||||||
|
|
||||||
for comment in pr.get_issue_comments():
|
|
||||||
markdown_content += (
|
|
||||||
f"- {comment.user.login} ({comment.created_at}): {comment.body}\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
return DocumentConverterResult(
|
|
||||||
title=pr.title,
|
|
||||||
text_content=markdown_content,
|
|
||||||
)
|
)
|
||||||
|
md_content = f"Content from the zip file `{os.path.basename(local_path)}`:\n\n"
|
||||||
|
|
||||||
|
# Safety check for path traversal
|
||||||
|
if not new_folder.startswith(os.path.dirname(local_path)):
|
||||||
|
return DocumentConverterResult(
|
||||||
|
title=None, text_content=f"[ERROR] Invalid zip file path: {local_path}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Extract the zip file
|
||||||
|
with zipfile.ZipFile(local_path, "r") as zipObj:
|
||||||
|
zipObj.extractall(path=new_folder)
|
||||||
|
|
||||||
|
# Process each extracted file
|
||||||
|
for root, dirs, files in os.walk(new_folder):
|
||||||
|
for name in files:
|
||||||
|
file_path = os.path.join(root, name)
|
||||||
|
relative_path = os.path.relpath(file_path, new_folder)
|
||||||
|
|
||||||
|
# Get file extension
|
||||||
|
_, file_extension = os.path.splitext(name)
|
||||||
|
|
||||||
|
# Update kwargs for the file
|
||||||
|
file_kwargs = kwargs.copy()
|
||||||
|
file_kwargs["file_extension"] = file_extension
|
||||||
|
file_kwargs["_parent_converters"] = parent_converters
|
||||||
|
|
||||||
|
# Try converting the file using available converters
|
||||||
|
for converter in parent_converters:
|
||||||
|
# Skip the zip converter to avoid infinite recursion
|
||||||
|
if isinstance(converter, ZipConverter):
|
||||||
|
continue
|
||||||
|
|
||||||
|
result = converter.convert(file_path, **file_kwargs)
|
||||||
|
if result is not None:
|
||||||
|
md_content += f"\n## File: {relative_path}\n\n"
|
||||||
|
md_content += result.text_content + "\n\n"
|
||||||
|
break
|
||||||
|
|
||||||
|
# Clean up extracted files if specified
|
||||||
|
if kwargs.get("cleanup_extracted", True):
|
||||||
|
shutil.rmtree(new_folder)
|
||||||
|
|
||||||
|
return DocumentConverterResult(title=None, text_content=md_content.strip())
|
||||||
|
|
||||||
|
except zipfile.BadZipFile:
|
||||||
|
return DocumentConverterResult(
|
||||||
|
title=None,
|
||||||
|
text_content=f"[ERROR] Invalid or corrupted zip file: {local_path}",
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return DocumentConverterResult(
|
||||||
|
title=None,
|
||||||
|
text_content=f"[ERROR] Failed to process zip file {local_path}: {str(e)}",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class FileConversionException(BaseException):
|
class FileConversionException(BaseException):
|
||||||
@@ -982,16 +1009,50 @@ class MarkItDown:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
requests_session: Optional[requests.Session] = None,
|
requests_session: Optional[requests.Session] = None,
|
||||||
|
llm_client: Optional[Any] = None,
|
||||||
|
llm_model: Optional[str] = None,
|
||||||
|
style_map: Optional[str] = None,
|
||||||
|
# Deprecated
|
||||||
mlm_client: Optional[Any] = None,
|
mlm_client: Optional[Any] = None,
|
||||||
mlm_model: Optional[Any] = None,
|
mlm_model: Optional[str] = None,
|
||||||
):
|
):
|
||||||
if requests_session is None:
|
if requests_session is None:
|
||||||
self._requests_session = requests.Session()
|
self._requests_session = requests.Session()
|
||||||
else:
|
else:
|
||||||
self._requests_session = requests_session
|
self._requests_session = requests_session
|
||||||
|
|
||||||
self._mlm_client = mlm_client
|
# Handle deprecation notices
|
||||||
self._mlm_model = mlm_model
|
#############################
|
||||||
|
if mlm_client is not None:
|
||||||
|
if llm_client is None:
|
||||||
|
warn(
|
||||||
|
"'mlm_client' is deprecated, and was renamed 'llm_client'.",
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
llm_client = mlm_client
|
||||||
|
mlm_client = None
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"'mlm_client' is deprecated, and was renamed 'llm_client'. Do not use both at the same time. Just use 'llm_client' instead."
|
||||||
|
)
|
||||||
|
|
||||||
|
if mlm_model is not None:
|
||||||
|
if llm_model is None:
|
||||||
|
warn(
|
||||||
|
"'mlm_model' is deprecated, and was renamed 'llm_model'.",
|
||||||
|
DeprecationWarning,
|
||||||
|
)
|
||||||
|
llm_model = mlm_model
|
||||||
|
mlm_model = None
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"'mlm_model' is deprecated, and was renamed 'llm_model'. Do not use both at the same time. Just use 'llm_model' instead."
|
||||||
|
)
|
||||||
|
#############################
|
||||||
|
|
||||||
|
self._llm_client = llm_client
|
||||||
|
self._llm_model = llm_model
|
||||||
|
self._style_map = style_map
|
||||||
|
|
||||||
self._page_converters: List[DocumentConverter] = []
|
self._page_converters: List[DocumentConverter] = []
|
||||||
|
|
||||||
@@ -1010,6 +1071,7 @@ class MarkItDown:
|
|||||||
self.register_page_converter(Mp3Converter())
|
self.register_page_converter(Mp3Converter())
|
||||||
self.register_page_converter(ImageConverter())
|
self.register_page_converter(ImageConverter())
|
||||||
self.register_page_converter(PdfConverter())
|
self.register_page_converter(PdfConverter())
|
||||||
|
self.register_page_converter(ZipConverter())
|
||||||
|
|
||||||
def convert(
|
def convert(
|
||||||
self, source: Union[str, requests.Response], **kwargs: Any
|
self, source: Union[str, requests.Response], **kwargs: Any
|
||||||
@@ -1019,6 +1081,7 @@ class MarkItDown:
|
|||||||
- source: can be a string representing a path or url, or a requests.response object
|
- source: can be a string representing a path or url, or a requests.response object
|
||||||
- extension: specifies the file extension to use when interpreting the file. If None, infer from source (path, uri, content-type, etc.)
|
- extension: specifies the file extension to use when interpreting the file. If None, infer from source (path, uri, content-type, etc.)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Local path or url
|
# Local path or url
|
||||||
if isinstance(source, str):
|
if isinstance(source, str):
|
||||||
if (
|
if (
|
||||||
@@ -1033,28 +1096,6 @@ class MarkItDown:
|
|||||||
elif isinstance(source, requests.Response):
|
elif isinstance(source, requests.Response):
|
||||||
return self.convert_response(source, **kwargs)
|
return self.convert_response(source, **kwargs)
|
||||||
|
|
||||||
def convert_url(
|
|
||||||
self, url: str, **kwargs: Any
|
|
||||||
) -> DocumentConverterResult: # TODO: fix kwargs type
|
|
||||||
# Handle GitHub issue and pull request URLs directly
|
|
||||||
parsed_url = urlparse(url)
|
|
||||||
if parsed_url.hostname == "github.com" and any(
|
|
||||||
x in parsed_url.path for x in ["/issues/", "/pull/"]
|
|
||||||
):
|
|
||||||
github_token = kwargs.get("github_token", os.getenv("GITHUB_TOKEN"))
|
|
||||||
if not github_token:
|
|
||||||
raise ValueError(
|
|
||||||
"GitHub token is required for GitHub issue or pull request conversion."
|
|
||||||
)
|
|
||||||
return GitHubIssueConverter().convert(
|
|
||||||
github_url=url, github_token=github_token
|
|
||||||
)
|
|
||||||
|
|
||||||
# Send a HTTP request to the URL
|
|
||||||
response = self._requests_session.get(url, stream=True)
|
|
||||||
response.raise_for_status()
|
|
||||||
return self.convert_response(response, **kwargs)
|
|
||||||
|
|
||||||
def convert_local(
|
def convert_local(
|
||||||
self, path: str, **kwargs: Any
|
self, path: str, **kwargs: Any
|
||||||
) -> DocumentConverterResult: # TODO: deal with kwargs
|
) -> DocumentConverterResult: # TODO: deal with kwargs
|
||||||
@@ -1109,6 +1150,14 @@ class MarkItDown:
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def convert_url(
|
||||||
|
self, url: str, **kwargs: Any
|
||||||
|
) -> DocumentConverterResult: # TODO: fix kwargs type
|
||||||
|
# Send a HTTP request to the URL
|
||||||
|
response = self._requests_session.get(url, stream=True)
|
||||||
|
response.raise_for_status()
|
||||||
|
return self.convert_response(response, **kwargs)
|
||||||
|
|
||||||
def convert_response(
|
def convert_response(
|
||||||
self, response: requests.Response, **kwargs: Any
|
self, response: requests.Response, **kwargs: Any
|
||||||
) -> DocumentConverterResult: # TODO fix kwargs type
|
) -> DocumentConverterResult: # TODO fix kwargs type
|
||||||
@@ -1146,7 +1195,7 @@ class MarkItDown:
|
|||||||
self._append_ext(extensions, g)
|
self._append_ext(extensions, g)
|
||||||
|
|
||||||
# Convert
|
# Convert
|
||||||
result = self._convert(temp_path, extensions, url=response.url)
|
result = self._convert(temp_path, extensions, url=response.url, **kwargs)
|
||||||
# Clean up
|
# Clean up
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
@@ -1173,11 +1222,17 @@ class MarkItDown:
|
|||||||
_kwargs.update({"file_extension": ext})
|
_kwargs.update({"file_extension": ext})
|
||||||
|
|
||||||
# Copy any additional global options
|
# Copy any additional global options
|
||||||
if "mlm_client" not in _kwargs and self._mlm_client is not None:
|
if "llm_client" not in _kwargs and self._llm_client is not None:
|
||||||
_kwargs["mlm_client"] = self._mlm_client
|
_kwargs["llm_client"] = self._llm_client
|
||||||
|
|
||||||
if "mlm_model" not in _kwargs and self._mlm_model is not None:
|
if "llm_model" not in _kwargs and self._llm_model is not None:
|
||||||
_kwargs["mlm_model"] = self._mlm_model
|
_kwargs["llm_model"] = self._llm_model
|
||||||
|
|
||||||
|
# Add the list of converters for nested processing
|
||||||
|
_kwargs["_parent_converters"] = self._page_converters
|
||||||
|
|
||||||
|
if "style_map" not in _kwargs and self._style_map is not None:
|
||||||
|
_kwargs["style_map"] = self._style_map
|
||||||
|
|
||||||
# If we hit an error log it and keep trying
|
# If we hit an error log it and keep trying
|
||||||
try:
|
try:
|
||||||
@@ -1214,8 +1269,7 @@ class MarkItDown:
|
|||||||
if ext == "":
|
if ext == "":
|
||||||
return
|
return
|
||||||
# if ext not in extensions:
|
# if ext not in extensions:
|
||||||
if True:
|
extensions.append(ext)
|
||||||
extensions.append(ext)
|
|
||||||
|
|
||||||
def _guess_ext_magic(self, path):
|
def _guess_ext_magic(self, path):
|
||||||
"""Use puremagic (a Python implementation of libmagic) to guess a file's extension based on the first few bytes."""
|
"""Use puremagic (a Python implementation of libmagic) to guess a file's extension based on the first few bytes."""
|
||||||
|
|||||||
0
tests/test_files/test.docx
vendored
Executable file → Normal file
0
tests/test_files/test.docx
vendored
Executable file → Normal file
0
tests/test_files/test.jpg
vendored
Executable file → Normal file
0
tests/test_files/test.jpg
vendored
Executable file → Normal file
|
Before Width: | Height: | Size: 463 KiB After Width: | Height: | Size: 463 KiB |
BIN
tests/test_files/test.pptx
vendored
Executable file → Normal file
BIN
tests/test_files/test.pptx
vendored
Executable file → Normal file
Binary file not shown.
0
tests/test_files/test.xlsx
vendored
Executable file → Normal file
0
tests/test_files/test.xlsx
vendored
Executable file → Normal file
BIN
tests/test_files/test_files.zip
vendored
Normal file
BIN
tests/test_files/test_files.zip
vendored
Normal file
Binary file not shown.
BIN
tests/test_files/test_llm.jpg
vendored
Normal file
BIN
tests/test_files/test_llm.jpg
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 145 KiB |
4
tests/test_files/test_mskanji.csv
vendored
Normal file
4
tests/test_files/test_mskanji.csv
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
<EFBFBD><EFBFBD><EFBFBD>O,<EFBFBD>N<EFBFBD><EFBFBD>,<EFBFBD>Z<EFBFBD><EFBFBD>
|
||||||
|
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Y,30,<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
|
<EFBFBD>O<EFBFBD>؉p<EFBFBD>q,25,<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||||
|
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>~,35,<EFBFBD><EFBFBD><EFBFBD>É<EFBFBD>
|
||||||
|
BIN
tests/test_files/test_with_comment.docx
vendored
Normal file
BIN
tests/test_files/test_with_comment.docx
vendored
Normal file
Binary file not shown.
@@ -6,11 +6,23 @@ import shutil
|
|||||||
import pytest
|
import pytest
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from warnings import catch_warnings, resetwarnings
|
||||||
|
|
||||||
from markitdown import MarkItDown
|
from markitdown import MarkItDown
|
||||||
|
|
||||||
skip_remote = (
|
skip_remote = (
|
||||||
True if os.environ.get("GITHUB_ACTIONS") else False
|
True if os.environ.get("GITHUB_ACTIONS") else False
|
||||||
) # Don't run these tests in CI
|
) # Don't run these tests in CI
|
||||||
|
|
||||||
|
|
||||||
|
# Don't run the llm tests without a key and the client library
|
||||||
|
skip_llm = False if os.environ.get("OPENAI_API_KEY") else True
|
||||||
|
try:
|
||||||
|
import openai
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
skip_llm = True
|
||||||
|
|
||||||
|
# Skip exiftool tests if not installed
|
||||||
skip_exiftool = shutil.which("exiftool") is None
|
skip_exiftool = shutil.which("exiftool") is None
|
||||||
|
|
||||||
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
|
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
|
||||||
@@ -51,12 +63,25 @@ DOCX_TEST_STRINGS = [
|
|||||||
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
DOCX_COMMENT_TEST_STRINGS = [
|
||||||
|
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
|
||||||
|
"49e168b7-d2ae-407f-a055-2167576f39a1",
|
||||||
|
"## d666f1f7-46cb-42bd-9a39-9a39cf2a509f",
|
||||||
|
"# Abstract",
|
||||||
|
"# Introduction",
|
||||||
|
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||||
|
"This is a test comment. 12df-321a",
|
||||||
|
"Yet another comment in the doc. 55yiyi-asd09",
|
||||||
|
]
|
||||||
|
|
||||||
PPTX_TEST_STRINGS = [
|
PPTX_TEST_STRINGS = [
|
||||||
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
|
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
|
||||||
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
|
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
|
||||||
"44bf7d06-5e7a-4a40-a2e1-a2e42ef28c8a",
|
"44bf7d06-5e7a-4a40-a2e1-a2e42ef28c8a",
|
||||||
"1b92870d-e3b5-4e65-8153-919f4ff45592",
|
"1b92870d-e3b5-4e65-8153-919f4ff45592",
|
||||||
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||||
|
"a3f6004b-6f4f-4ea8-bee3-3741f4dc385f", # chart title
|
||||||
|
"2003", # chart value
|
||||||
]
|
]
|
||||||
|
|
||||||
BLOG_TEST_URL = "https://microsoft.github.io/autogen/blog/2023/04/21/LLM-tuning-math"
|
BLOG_TEST_URL = "https://microsoft.github.io/autogen/blog/2023/04/21/LLM-tuning-math"
|
||||||
@@ -87,9 +112,16 @@ SERP_TEST_EXCLUDES = [
|
|||||||
"data:image/svg+xml,%3Csvg%20width%3D",
|
"data:image/svg+xml,%3Csvg%20width%3D",
|
||||||
]
|
]
|
||||||
|
|
||||||
GITHUB_ISSUE_URL = "https://github.com/microsoft/autogen/issues/1421"
|
CSV_CP932_TEST_STRINGS = [
|
||||||
GITHUB_PR_URL = "https://github.com/microsoft/autogen/pull/194"
|
"名前,年齢,住所",
|
||||||
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN", "")
|
"佐藤太郎,30,東京",
|
||||||
|
"三木英子,25,大阪",
|
||||||
|
"髙橋淳,35,名古屋",
|
||||||
|
]
|
||||||
|
|
||||||
|
LLM_TEST_STRINGS = [
|
||||||
|
"5bda1dd6",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
@@ -134,6 +166,24 @@ def test_markitdown_local() -> None:
|
|||||||
text_content = result.text_content.replace("\\", "")
|
text_content = result.text_content.replace("\\", "")
|
||||||
assert test_string in text_content
|
assert test_string in text_content
|
||||||
|
|
||||||
|
# Test DOCX processing, with comments
|
||||||
|
result = markitdown.convert(
|
||||||
|
os.path.join(TEST_FILES_DIR, "test_with_comment.docx"),
|
||||||
|
style_map="comment-reference => ",
|
||||||
|
)
|
||||||
|
for test_string in DOCX_COMMENT_TEST_STRINGS:
|
||||||
|
text_content = result.text_content.replace("\\", "")
|
||||||
|
assert test_string in text_content
|
||||||
|
|
||||||
|
# Test DOCX processing, with comments and setting style_map on init
|
||||||
|
markitdown_with_style_map = MarkItDown(style_map="comment-reference => ")
|
||||||
|
result = markitdown_with_style_map.convert(
|
||||||
|
os.path.join(TEST_FILES_DIR, "test_with_comment.docx")
|
||||||
|
)
|
||||||
|
for test_string in DOCX_COMMENT_TEST_STRINGS:
|
||||||
|
text_content = result.text_content.replace("\\", "")
|
||||||
|
assert test_string in text_content
|
||||||
|
|
||||||
# Test PPTX processing
|
# Test PPTX processing
|
||||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
|
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
|
||||||
for test_string in PPTX_TEST_STRINGS:
|
for test_string in PPTX_TEST_STRINGS:
|
||||||
@@ -148,6 +198,12 @@ def test_markitdown_local() -> None:
|
|||||||
text_content = result.text_content.replace("\\", "")
|
text_content = result.text_content.replace("\\", "")
|
||||||
assert test_string in text_content
|
assert test_string in text_content
|
||||||
|
|
||||||
|
# Test ZIP file processing
|
||||||
|
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_files.zip"))
|
||||||
|
for test_string in DOCX_TEST_STRINGS:
|
||||||
|
text_content = result.text_content.replace("\\", "")
|
||||||
|
assert test_string in text_content
|
||||||
|
|
||||||
# Test Wikipedia processing
|
# Test Wikipedia processing
|
||||||
result = markitdown.convert(
|
result = markitdown.convert(
|
||||||
os.path.join(TEST_FILES_DIR, "test_wikipedia.html"), url=WIKIPEDIA_TEST_URL
|
os.path.join(TEST_FILES_DIR, "test_wikipedia.html"), url=WIKIPEDIA_TEST_URL
|
||||||
@@ -168,6 +224,12 @@ def test_markitdown_local() -> None:
|
|||||||
for test_string in SERP_TEST_STRINGS:
|
for test_string in SERP_TEST_STRINGS:
|
||||||
assert test_string in text_content
|
assert test_string in text_content
|
||||||
|
|
||||||
|
## Test non-UTF-8 encoding
|
||||||
|
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_mskanji.csv"))
|
||||||
|
text_content = result.text_content.replace("\\", "")
|
||||||
|
for test_string in CSV_CP932_TEST_STRINGS:
|
||||||
|
assert test_string in text_content
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
skip_exiftool,
|
skip_exiftool,
|
||||||
@@ -183,28 +245,57 @@ def test_markitdown_exiftool() -> None:
|
|||||||
assert target in result.text_content
|
assert target in result.text_content
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
def test_markitdown_deprecation() -> None:
|
||||||
not GITHUB_TOKEN,
|
try:
|
||||||
reason="GitHub token not provided",
|
with catch_warnings(record=True) as w:
|
||||||
)
|
test_client = object()
|
||||||
def test_markitdown_github_issue() -> None:
|
markitdown = MarkItDown(mlm_client=test_client)
|
||||||
markitdown = MarkItDown()
|
assert len(w) == 1
|
||||||
result = markitdown.convert(GITHUB_ISSUE_URL, github_token=GITHUB_TOKEN)
|
assert w[0].category is DeprecationWarning
|
||||||
print(result.text_content)
|
assert markitdown._llm_client == test_client
|
||||||
assert "User-Defined Functions" in result.text_content
|
finally:
|
||||||
assert "closed" in result.text_content
|
resetwarnings()
|
||||||
assert "Comments:" in result.text_content
|
|
||||||
|
try:
|
||||||
|
with catch_warnings(record=True) as w:
|
||||||
|
markitdown = MarkItDown(mlm_model="gpt-4o")
|
||||||
|
assert len(w) == 1
|
||||||
|
assert w[0].category is DeprecationWarning
|
||||||
|
assert markitdown._llm_model == "gpt-4o"
|
||||||
|
finally:
|
||||||
|
resetwarnings()
|
||||||
|
|
||||||
|
try:
|
||||||
|
test_client = object()
|
||||||
|
markitdown = MarkItDown(mlm_client=test_client, llm_client=test_client)
|
||||||
|
assert False
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
markitdown = MarkItDown(mlm_model="gpt-4o", llm_model="gpt-4o")
|
||||||
|
assert False
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
not GITHUB_TOKEN,
|
skip_llm,
|
||||||
reason="GitHub token not provided",
|
reason="do not run llm tests without a key",
|
||||||
)
|
)
|
||||||
def test_markitdown_github_pr() -> None:
|
def test_markitdown_llm() -> None:
|
||||||
markitdown = MarkItDown()
|
client = openai.OpenAI()
|
||||||
result = markitdown.convert(GITHUB_PR_URL, github_token=GITHUB_TOKEN)
|
markitdown = MarkItDown(llm_client=client, llm_model="gpt-4o")
|
||||||
print(result.text_content)
|
|
||||||
assert "faq" in result.text_content
|
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_llm.jpg"))
|
||||||
|
|
||||||
|
for test_string in LLM_TEST_STRINGS:
|
||||||
|
assert test_string in result.text_content
|
||||||
|
|
||||||
|
# This is not super precise. It would also accept "red square", "blue circle",
|
||||||
|
# "the square is not blue", etc. But it's sufficient for this test.
|
||||||
|
for test_string in ["red", "circle", "blue", "square"]:
|
||||||
|
assert test_string in result.text_content.lower()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -212,5 +303,5 @@ if __name__ == "__main__":
|
|||||||
test_markitdown_remote()
|
test_markitdown_remote()
|
||||||
test_markitdown_local()
|
test_markitdown_local()
|
||||||
test_markitdown_exiftool()
|
test_markitdown_exiftool()
|
||||||
test_markitdown_github_issue()
|
test_markitdown_deprecation()
|
||||||
test_markitdown_github_pr()
|
test_markitdown_llm()
|
||||||
|
|||||||
Reference in New Issue
Block a user