Merge branch 'main' into patch-2
This commit is contained in:
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
|||||||
|
*
|
||||||
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
tests/test_files/** linguist-vendored
|
||||||
16
Dockerfile
Normal file
16
Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
FROM python:3.13-alpine
|
||||||
|
|
||||||
|
USER root
|
||||||
|
|
||||||
|
# Runtime dependency
|
||||||
|
RUN apk add --no-cache ffmpeg
|
||||||
|
|
||||||
|
RUN pip install markitdown
|
||||||
|
|
||||||
|
# Default USERID and GROUPID
|
||||||
|
ARG USERID=10000
|
||||||
|
ARG GROUPID=10000
|
||||||
|
|
||||||
|
USER $USERID:$GROUPID
|
||||||
|
|
||||||
|
ENTRYPOINT [ "markitdown" ]
|
||||||
@@ -60,7 +60,7 @@ cat path-to-file.pdf | markitdown
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
You can also configure markitdown to use Large Language Models to describe images. To do so you must provide mlm_client and mlm_model parameters to MarkItDown object, according to your specific client.
|
You can also configure markitdown to use Large Language Models to describe images. To do so you must provide `mlm_client` and `mlm_model` parameters to MarkItDown object, according to your specific client.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from markitdown import MarkItDown
|
from markitdown import MarkItDown
|
||||||
@@ -72,6 +72,13 @@ result = md.convert("example.jpg")
|
|||||||
print(result.text_content)
|
print(result.text_content)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You can also use the project as Docker Image:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker build -t markitdown:latest .
|
||||||
|
docker run --rm -i markitdown:latest < ~/your-file.pdf > output.md
|
||||||
|
```
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import traceback
|
|||||||
import zipfile
|
import zipfile
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
from urllib.parse import parse_qs, quote, unquote, urlparse, urlunparse
|
from urllib.parse import parse_qs, quote, unquote, urlparse, urlunparse
|
||||||
|
from warnings import catch_warnings
|
||||||
|
|
||||||
import mammoth
|
import mammoth
|
||||||
import markdownify
|
import markdownify
|
||||||
@@ -31,7 +32,13 @@ from charset_normalizer import from_path
|
|||||||
|
|
||||||
# Optional Transcription support
|
# Optional Transcription support
|
||||||
try:
|
try:
|
||||||
import pydub
|
# Using warnings' catch_warnings to catch
|
||||||
|
# pydub's warning of ffmpeg or avconv missing
|
||||||
|
with catch_warnings(record=True) as w:
|
||||||
|
import pydub
|
||||||
|
|
||||||
|
if w:
|
||||||
|
raise ModuleNotFoundError
|
||||||
import speech_recognition as sr
|
import speech_recognition as sr
|
||||||
|
|
||||||
IS_AUDIO_TRANSCRIPTION_CAPABLE = True
|
IS_AUDIO_TRANSCRIPTION_CAPABLE = True
|
||||||
@@ -344,8 +351,11 @@ class YouTubeConverter(DocumentConverter):
|
|||||||
assert isinstance(params["v"][0], str)
|
assert isinstance(params["v"][0], str)
|
||||||
video_id = str(params["v"][0])
|
video_id = str(params["v"][0])
|
||||||
try:
|
try:
|
||||||
|
youtube_transcript_languages = kwargs.get(
|
||||||
|
"youtube_transcript_languages", ("en",)
|
||||||
|
)
|
||||||
# Must be a single transcript.
|
# Must be a single transcript.
|
||||||
transcript = YouTubeTranscriptApi.get_transcript(video_id) # type: ignore
|
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=youtube_transcript_languages) # type: ignore
|
||||||
transcript_text = " ".join([part["text"] for part in transcript]) # type: ignore
|
transcript_text = " ".join([part["text"] for part in transcript]) # type: ignore
|
||||||
# Alternative formatting:
|
# Alternative formatting:
|
||||||
# formatter = TextFormatter()
|
# formatter = TextFormatter()
|
||||||
@@ -492,7 +502,9 @@ class DocxConverter(HtmlConverter):
|
|||||||
|
|
||||||
result = None
|
result = None
|
||||||
with open(local_path, "rb") as docx_file:
|
with open(local_path, "rb") as docx_file:
|
||||||
result = mammoth.convert_to_html(docx_file)
|
style_map = kwargs.get("style_map", None)
|
||||||
|
|
||||||
|
result = mammoth.convert_to_html(docx_file, style_map=style_map)
|
||||||
html_content = result.value
|
html_content = result.value
|
||||||
result = self._convert(html_content)
|
result = self._convert(html_content)
|
||||||
|
|
||||||
@@ -999,6 +1011,7 @@ class MarkItDown:
|
|||||||
requests_session: Optional[requests.Session] = None,
|
requests_session: Optional[requests.Session] = None,
|
||||||
llm_client: Optional[Any] = None,
|
llm_client: Optional[Any] = None,
|
||||||
llm_model: Optional[Any] = None,
|
llm_model: Optional[Any] = None,
|
||||||
|
style_map: Optional[str] = None,
|
||||||
):
|
):
|
||||||
if requests_session is None:
|
if requests_session is None:
|
||||||
self._requests_session = requests.Session()
|
self._requests_session = requests.Session()
|
||||||
@@ -1007,6 +1020,7 @@ class MarkItDown:
|
|||||||
|
|
||||||
self._llm_client = llm_client
|
self._llm_client = llm_client
|
||||||
self._llm_model = llm_model
|
self._llm_model = llm_model
|
||||||
|
self._style_map = style_map
|
||||||
|
|
||||||
self._page_converters: List[DocumentConverter] = []
|
self._page_converters: List[DocumentConverter] = []
|
||||||
|
|
||||||
@@ -1149,7 +1163,7 @@ class MarkItDown:
|
|||||||
self._append_ext(extensions, g)
|
self._append_ext(extensions, g)
|
||||||
|
|
||||||
# Convert
|
# Convert
|
||||||
result = self._convert(temp_path, extensions, url=response.url)
|
result = self._convert(temp_path, extensions, url=response.url, **kwargs)
|
||||||
# Clean up
|
# Clean up
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
@@ -1185,6 +1199,9 @@ class MarkItDown:
|
|||||||
# Add the list of converters for nested processing
|
# Add the list of converters for nested processing
|
||||||
_kwargs["_parent_converters"] = self._page_converters
|
_kwargs["_parent_converters"] = self._page_converters
|
||||||
|
|
||||||
|
if "style_map" not in _kwargs and self._style_map is not None:
|
||||||
|
_kwargs["style_map"] = self._style_map
|
||||||
|
|
||||||
# If we hit an error log it and keep trying
|
# If we hit an error log it and keep trying
|
||||||
try:
|
try:
|
||||||
res = converter.convert(local_path, **_kwargs)
|
res = converter.convert(local_path, **_kwargs)
|
||||||
|
|||||||
BIN
tests/test_files/test_with_comment.docx
vendored
Executable file
BIN
tests/test_files/test_with_comment.docx
vendored
Executable file
Binary file not shown.
@@ -51,6 +51,17 @@ DOCX_TEST_STRINGS = [
|
|||||||
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
DOCX_COMMENT_TEST_STRINGS = [
|
||||||
|
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
|
||||||
|
"49e168b7-d2ae-407f-a055-2167576f39a1",
|
||||||
|
"## d666f1f7-46cb-42bd-9a39-9a39cf2a509f",
|
||||||
|
"# Abstract",
|
||||||
|
"# Introduction",
|
||||||
|
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||||
|
"This is a test comment. 12df-321a",
|
||||||
|
"Yet another comment in the doc. 55yiyi-asd09",
|
||||||
|
]
|
||||||
|
|
||||||
PPTX_TEST_STRINGS = [
|
PPTX_TEST_STRINGS = [
|
||||||
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
|
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
|
||||||
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
|
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
|
||||||
@@ -139,6 +150,24 @@ def test_markitdown_local() -> None:
|
|||||||
text_content = result.text_content.replace("\\", "")
|
text_content = result.text_content.replace("\\", "")
|
||||||
assert test_string in text_content
|
assert test_string in text_content
|
||||||
|
|
||||||
|
# Test DOCX processing, with comments
|
||||||
|
result = markitdown.convert(
|
||||||
|
os.path.join(TEST_FILES_DIR, "test_with_comment.docx"),
|
||||||
|
style_map="comment-reference => ",
|
||||||
|
)
|
||||||
|
for test_string in DOCX_COMMENT_TEST_STRINGS:
|
||||||
|
text_content = result.text_content.replace("\\", "")
|
||||||
|
assert test_string in text_content
|
||||||
|
|
||||||
|
# Test DOCX processing, with comments and setting style_map on init
|
||||||
|
markitdown_with_style_map = MarkItDown(style_map="comment-reference => ")
|
||||||
|
result = markitdown_with_style_map.convert(
|
||||||
|
os.path.join(TEST_FILES_DIR, "test_with_comment.docx")
|
||||||
|
)
|
||||||
|
for test_string in DOCX_COMMENT_TEST_STRINGS:
|
||||||
|
text_content = result.text_content.replace("\\", "")
|
||||||
|
assert test_string in text_content
|
||||||
|
|
||||||
# Test PPTX processing
|
# Test PPTX processing
|
||||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
|
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
|
||||||
for test_string in PPTX_TEST_STRINGS:
|
for test_string in PPTX_TEST_STRINGS:
|
||||||
|
|||||||
Reference in New Issue
Block a user