Compare commits
102 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e58bc486ee | ||
|
|
81ef601c09 | ||
|
|
518b12c1fb | ||
|
|
8eaf5a1da9 | ||
|
|
38c924793c | ||
|
|
b9526d5e47 | ||
|
|
519fe172aa | ||
|
|
abe9752438 | ||
|
|
73ba69d8cd | ||
|
|
2a4f7bb6a8 | ||
|
|
7cf5e0bb23 | ||
|
|
3090917a49 | ||
|
|
7bea2672a0 | ||
|
|
bf6a15e9b5 | ||
|
|
bfde857420 | ||
|
|
f58a864951 | ||
|
|
265aea2edf | ||
|
|
05b78e7ce1 | ||
|
|
436407288f | ||
|
|
731b39e7f5 | ||
|
|
08ed32869e | ||
|
|
d248621ba4 | ||
|
|
4678c8a2a4 | ||
|
|
125e206047 | ||
|
|
f94d09990e | ||
|
|
cfd2319c14 | ||
|
|
73161982ff | ||
|
|
9b69467772 | ||
|
|
857a2d160d | ||
|
|
1123392306 | ||
|
|
377a7eaa7d | ||
|
|
c1a0d3deaf | ||
|
|
5276616ba1 | ||
|
|
7e6c36c5d4 | ||
|
|
52d73080c7 | ||
|
|
e6421000e3 | ||
|
|
08a25345e3 | ||
|
|
8921fe7304 | ||
|
|
613825d5b3 | ||
|
|
18e3f1d428 | ||
|
|
c295dee5e4 | ||
|
|
dd87dd5e36 | ||
|
|
535147b2e8 | ||
|
|
5c776bda70 | ||
|
|
423a01844a | ||
|
|
7147bef7d5 | ||
|
|
a5f39d6922 | ||
|
|
925c4499f7 | ||
|
|
b28f380a47 | ||
|
|
c86287b7e3 | ||
|
|
6f3c762526 | ||
|
|
cb66b35f11 | ||
|
|
a2743a5314 | ||
|
|
277480066a | ||
|
|
6e1b9a7402 | ||
|
|
1384e80725 | ||
|
|
356e895306 | ||
|
|
f6e75c46d4 | ||
|
|
8bc1bee18b | ||
|
|
f4471d96e2 | ||
|
|
088007338d | ||
|
|
bb929629f3 | ||
|
|
233ba679b8 | ||
|
|
46b7f043d3 | ||
|
|
5fc70864f2 | ||
|
|
39410d01df | ||
|
|
6e4caac70d | ||
|
|
224f1df0fc | ||
|
|
1deaba1c6c | ||
|
|
09cb048cbe | ||
|
|
b029ae1cd4 | ||
|
|
524aa0da75 | ||
|
|
de1b54d79f | ||
|
|
1e7806a7ac | ||
|
|
1163aa2b4e | ||
|
|
3bcf2bdae7 | ||
|
|
41a10b9a35 | ||
|
|
f1e399eee4 | ||
|
|
8b02c0bf9f | ||
|
|
1dda535330 | ||
|
|
362214323e | ||
|
|
457b6234e6 | ||
|
|
790031409b | ||
|
|
9e546a8588 | ||
|
|
ddf695cf81 | ||
|
|
8d5f16ecd2 | ||
|
|
a571021199 | ||
|
|
9add517510 | ||
|
|
05a49ca129 | ||
|
|
752fbd333c | ||
|
|
7dc2695b96 | ||
|
|
53fad6eb31 | ||
|
|
f398f3d443 | ||
|
|
e0a30295ff | ||
|
|
07fe457a90 | ||
|
|
60c4a62917 | ||
|
|
3eb8cf385b | ||
|
|
8c91c11ea8 | ||
|
|
14bd8d319a | ||
|
|
dbc727615d | ||
|
|
afaff11ef0 | ||
|
|
a3208f2bd0 |
32
.devcontainer/devcontainer.json
Normal file
32
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,32 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-dockerfile
|
||||
{
|
||||
"name": "Existing Dockerfile",
|
||||
"build": {
|
||||
// Sets the run context to one level up instead of the .devcontainer folder.
|
||||
"context": "..",
|
||||
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
|
||||
"dockerfile": "../Dockerfile",
|
||||
"args": {
|
||||
"INSTALL_GIT": "true"
|
||||
}
|
||||
},
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
// "features": {},
|
||||
"features": {
|
||||
"ghcr.io/devcontainers-extra/features/hatch:2": {}
|
||||
},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Uncomment the next line to run commands after the container is created.
|
||||
// "postCreateCommand": "cat /etc/os-release",
|
||||
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
|
||||
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
|
||||
"remoteUser": "root"
|
||||
}
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
4
.github/workflows/pre-commit.yml
vendored
4
.github/workflows/pre-commit.yml
vendored
@@ -5,9 +5,9 @@ jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
|
||||
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@@ -5,8 +5,8 @@ jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: |
|
||||
3.10
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
3.12
|
||||
- name: Set up pip cache
|
||||
if: runner.os == 'Linux'
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml') }}
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
.vscode
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
@@ -160,3 +162,5 @@ cython_debug/
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
src/.DS_Store
|
||||
.DS_Store
|
||||
|
||||
11
Dockerfile
11
Dockerfile
@@ -1,9 +1,16 @@
|
||||
FROM python:3.13-alpine
|
||||
FROM python:3.13-slim-bullseye
|
||||
|
||||
USER root
|
||||
|
||||
ARG INSTALL_GIT=false
|
||||
RUN if [ "$INSTALL_GIT" = "true" ]; then \
|
||||
apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
# Runtime dependency
|
||||
RUN apk add --no-cache ffmpeg
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ffmpeg \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install markitdown
|
||||
|
||||
|
||||
142
README.md
142
README.md
@@ -1,66 +1,75 @@
|
||||
# MarkItDown
|
||||
|
||||
[](https://pypi.org/project/markitdown/)
|
||||

|
||||
[](https://github.com/microsoft/autogen)
|
||||
|
||||
The MarkItDown library is a utility tool for converting various files to Markdown (e.g., for indexing, text analysis, etc.)
|
||||
|
||||
It presently supports:
|
||||
MarkItDown is a utility for converting various files to Markdown (e.g., for indexing, text analysis, etc).
|
||||
It supports:
|
||||
- PDF
|
||||
- PowerPoint
|
||||
- Word
|
||||
- Excel
|
||||
- Images (EXIF metadata and OCR)
|
||||
- Audio (EXIF metadata and speech transcription)
|
||||
- HTML
|
||||
- Text-based formats (CSV, JSON, XML)
|
||||
- ZIP files (iterates over contents)
|
||||
|
||||
- PDF (.pdf)
|
||||
- PowerPoint (.pptx)
|
||||
- Word (.docx)
|
||||
- Excel (.xlsx)
|
||||
- Images (EXIF metadata, and OCR)
|
||||
- Audio (EXIF metadata, and speech transcription)
|
||||
- HTML (special handling of Wikipedia, etc.)
|
||||
- Various other text-based formats (csv, json, xml, etc.)
|
||||
- ZIP (Iterates over contents and converts each file)
|
||||
To install MarkItDown, use pip: `pip install markitdown`. Alternatively, you can install it from the source: `pip install -e .`
|
||||
|
||||
# Installation
|
||||
## Usage
|
||||
|
||||
You can install `markitdown` using pip:
|
||||
|
||||
```python
|
||||
pip install markitdown
|
||||
```
|
||||
|
||||
or from the source
|
||||
|
||||
```sh
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
# Usage
|
||||
The API is simple:
|
||||
|
||||
```python
|
||||
from markitdown import MarkItDown
|
||||
|
||||
markitdown = MarkItDown()
|
||||
result = markitdown.convert("test.xlsx")
|
||||
print(result.text_content)
|
||||
```
|
||||
|
||||
To use this as a command-line utility, install it and then run it like this:
|
||||
|
||||
```bash
|
||||
markitdown path-to-file.pdf
|
||||
```
|
||||
|
||||
This will output Markdown to standard output. You can save it like this:
|
||||
### Command-Line
|
||||
|
||||
```bash
|
||||
markitdown path-to-file.pdf > document.md
|
||||
```
|
||||
|
||||
You can pipe content to standard input by omitting the argument:
|
||||
Or use `-o` to specify the output file:
|
||||
|
||||
```bash
|
||||
markitdown path-to-file.pdf -o document.md
|
||||
```
|
||||
|
||||
To use Document Intelligence conversion:
|
||||
|
||||
```bash
|
||||
markitdown path-to-file.pdf -o document.md -d -e "<document_intelligence_endpoint>"
|
||||
```
|
||||
|
||||
You can also pipe content:
|
||||
|
||||
```bash
|
||||
cat path-to-file.pdf | markitdown
|
||||
```
|
||||
|
||||
You can also configure markitdown to use Large Language Models to describe images. To do so you must provide `llm_client` and `llm_model` parameters to MarkItDown object, according to your specific client.
|
||||
More information about how to set up an Azure Document Intelligence Resource can be found [here](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/create-document-intelligence-resource?view=doc-intel-4.0.0)
|
||||
|
||||
### Python API
|
||||
|
||||
Basic usage in Python:
|
||||
|
||||
```python
|
||||
from markitdown import MarkItDown
|
||||
|
||||
md = MarkItDown()
|
||||
result = md.convert("test.xlsx")
|
||||
print(result.text_content)
|
||||
```
|
||||
|
||||
Document Intelligence conversion in Python:
|
||||
|
||||
```python
|
||||
from markitdown import MarkItDown
|
||||
|
||||
md = MarkItDown(docintel_endpoint="<document_intelligence_endpoint>")
|
||||
result = md.convert("test.pdf")
|
||||
print(result.text_content)
|
||||
```
|
||||
|
||||
To use Large Language Models for image descriptions, provide `llm_client` and `llm_model`:
|
||||
|
||||
```python
|
||||
from markitdown import MarkItDown
|
||||
@@ -72,13 +81,13 @@ result = md.convert("example.jpg")
|
||||
print(result.text_content)
|
||||
```
|
||||
|
||||
You can also use the project as Docker Image:
|
||||
### Docker
|
||||
|
||||
```sh
|
||||
docker build -t markitdown:latest .
|
||||
docker run --rm -i markitdown:latest < ~/your-file.pdf > output.md
|
||||
```
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
@@ -93,28 +102,41 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
### Running Tests
|
||||
### How to Contribute
|
||||
|
||||
To run tests, install `hatch` using `pip` or other methods as described [here](https://hatch.pypa.io/dev/install).
|
||||
You can help by looking at issues or helping review PRs. Any issue or PR is welcome, but we have also marked some as 'open for contribution' and 'open for reviewing' to help facilitate community contributions. These are ofcourse just suggestions and you are welcome to contribute in any way you like.
|
||||
|
||||
```sh
|
||||
pip install hatch
|
||||
hatch shell
|
||||
hatch test
|
||||
```
|
||||
|
||||
### Running Pre-commit Checks
|
||||
<div align="center">
|
||||
|
||||
Please run the pre-commit checks before submitting a PR.
|
||||
| | All | Especially Needs Help from Community |
|
||||
|-----------------------|------------------------------------------|------------------------------------------------------------------------------------------|
|
||||
| **Issues** | [All Issues](https://github.com/microsoft/markitdown/issues) | [Issues open for contribution](https://github.com/microsoft/markitdown/issues?q=is%3Aissue+is%3Aopen+label%3A%22open+for+contribution%22) |
|
||||
| **PRs** | [All PRs](https://github.com/microsoft/markitdown/pulls) | [PRs open for reviewing](https://github.com/microsoft/markitdown/pulls?q=is%3Apr+is%3Aopen+label%3A%22open+for+reviewing%22) |
|
||||
|
||||
```sh
|
||||
pre-commit run --all-files
|
||||
```
|
||||
</div>
|
||||
|
||||
### Running Tests and Checks
|
||||
|
||||
- Install `hatch` in your environment and run tests:
|
||||
```sh
|
||||
pip install hatch # Other ways of installing hatch: https://hatch.pypa.io/dev/install/
|
||||
hatch shell
|
||||
hatch test
|
||||
```
|
||||
|
||||
(Alternative) Use the Devcontainer which has all the dependencies installed:
|
||||
```sh
|
||||
# Reopen the project in Devcontainer and run:
|
||||
hatch test
|
||||
```
|
||||
|
||||
- Run pre-commit checks before submitting a PR: `pre-commit run --all-files`
|
||||
|
||||
## Trademarks
|
||||
|
||||
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
|
||||
trademarks or logos is subject to and must follow
|
||||
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
|
||||
trademarks or logos is subject to and must follow
|
||||
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
|
||||
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
|
||||
Any use of third-party trademarks or logos are subject to those third-party's policies.
|
||||
|
||||
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
|
||||
[project]
|
||||
name = "markitdown"
|
||||
dynamic = ["version"]
|
||||
description = ''
|
||||
description = 'Utility tool for converting various files to Markdown'
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
license = "MIT"
|
||||
@@ -32,14 +32,18 @@ dependencies = [
|
||||
"python-pptx",
|
||||
"pandas",
|
||||
"openpyxl",
|
||||
"xlrd",
|
||||
"pdfminer.six",
|
||||
"puremagic",
|
||||
"pydub",
|
||||
"olefile",
|
||||
"youtube-transcript-api",
|
||||
"SpeechRecognition",
|
||||
"pathvalidate",
|
||||
"charset-normalizer",
|
||||
"openai",
|
||||
"azure-ai-documentintelligence",
|
||||
"azure-identity"
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
__version__ = "0.0.1a3"
|
||||
__version__ = "0.0.2"
|
||||
|
||||
@@ -1,45 +1,104 @@
|
||||
# SPDX-FileCopyrightText: 2024-present Adam Fourney <adamfo@microsoft.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
import sys
|
||||
import argparse
|
||||
from ._markitdown import MarkItDown
|
||||
import sys
|
||||
from textwrap import dedent
|
||||
from .__about__ import __version__
|
||||
from ._markitdown import MarkItDown, DocumentConverterResult
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Convert various file formats to markdown.",
|
||||
prog="markitdown",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
usage="""
|
||||
SYNTAX:
|
||||
usage=dedent(
|
||||
"""
|
||||
SYNTAX:
|
||||
|
||||
markitdown <OPTIONAL: FILENAME>
|
||||
If FILENAME is empty, markitdown reads from stdin.
|
||||
|
||||
EXAMPLE:
|
||||
|
||||
markitdown example.pdf
|
||||
|
||||
OR
|
||||
|
||||
cat example.pdf | markitdown
|
||||
|
||||
OR
|
||||
|
||||
markitdown < example.pdf
|
||||
|
||||
OR to save to a file use
|
||||
|
||||
markitdown <OPTIONAL: FILENAME>
|
||||
If FILENAME is empty, markitdown reads from stdin.
|
||||
markitdown example.pdf -o example.md
|
||||
|
||||
OR
|
||||
|
||||
markitdown example.pdf > example.md
|
||||
"""
|
||||
).strip(),
|
||||
)
|
||||
|
||||
EXAMPLE:
|
||||
|
||||
markitdown example.pdf
|
||||
|
||||
OR
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--version",
|
||||
action="version",
|
||||
version=f"%(prog)s {__version__}",
|
||||
help="show the version number and exit",
|
||||
)
|
||||
|
||||
cat example.pdf | markitdown
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
help="Output file name. If not provided, output is written to stdout.",
|
||||
)
|
||||
|
||||
OR
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--use-docintel",
|
||||
action="store_true",
|
||||
help="Use Document Intelligence to extract text instead of offline conversion. Requires a valid Document Intelligence Endpoint.",
|
||||
)
|
||||
|
||||
markitdown < example.pdf
|
||||
""".strip(),
|
||||
parser.add_argument(
|
||||
"-e",
|
||||
"--endpoint",
|
||||
type=str,
|
||||
help="Document Intelligence Endpoint. Required if using Document Intelligence.",
|
||||
)
|
||||
|
||||
parser.add_argument("filename", nargs="?")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.filename is None:
|
||||
markitdown = MarkItDown()
|
||||
result = markitdown.convert_stream(sys.stdin.buffer)
|
||||
print(result.text_content)
|
||||
if args.use_docintel:
|
||||
if args.endpoint is None:
|
||||
raise ValueError(
|
||||
"Document Intelligence Endpoint is required when using Document Intelligence."
|
||||
)
|
||||
elif args.filename is None:
|
||||
raise ValueError("Filename is required when using Document Intelligence.")
|
||||
markitdown = MarkItDown(docintel_endpoint=args.endpoint)
|
||||
else:
|
||||
markitdown = MarkItDown()
|
||||
|
||||
if args.filename is None:
|
||||
result = markitdown.convert_stream(sys.stdin.buffer)
|
||||
else:
|
||||
result = markitdown.convert(args.filename)
|
||||
|
||||
_handle_output(args, result)
|
||||
|
||||
|
||||
def _handle_output(args, result: DocumentConverterResult):
|
||||
"""Handle output to stdout or file"""
|
||||
if args.output:
|
||||
with open(args.output, "w", encoding="utf-8") as f:
|
||||
f.write(result.text_content)
|
||||
else:
|
||||
print(result.text_content)
|
||||
|
||||
|
||||
|
||||
@@ -13,12 +13,15 @@ import sys
|
||||
import tempfile
|
||||
import traceback
|
||||
import zipfile
|
||||
from xml.dom import minidom
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from pathlib import Path
|
||||
from urllib.parse import parse_qs, quote, unquote, urlparse, urlunparse
|
||||
from warnings import warn, resetwarnings, catch_warnings
|
||||
from warnings import warn, filterwarnings
|
||||
|
||||
import mammoth
|
||||
import markdownify
|
||||
import olefile
|
||||
import pandas as pd
|
||||
import pdfminer
|
||||
import pdfminer.high_level
|
||||
@@ -30,22 +33,32 @@ import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from charset_normalizer import from_path
|
||||
|
||||
# Optional Transcription support
|
||||
try:
|
||||
# Using warnings' catch_warnings to catch
|
||||
# pydub's warning of ffmpeg or avconv missing
|
||||
with catch_warnings(record=True) as w:
|
||||
import pydub
|
||||
# Azure imports
|
||||
from azure.ai.documentintelligence import DocumentIntelligenceClient
|
||||
from azure.ai.documentintelligence.models import (
|
||||
AnalyzeDocumentRequest,
|
||||
AnalyzeResult,
|
||||
DocumentAnalysisFeature,
|
||||
)
|
||||
from azure.identity import DefaultAzureCredential
|
||||
|
||||
if w:
|
||||
raise ModuleNotFoundError
|
||||
# TODO: currently, there is a bug in the document intelligence SDK with importing the "ContentFormat" enum.
|
||||
# This constant is a temporary fix until the bug is resolved.
|
||||
CONTENT_FORMAT = "markdown"
|
||||
|
||||
# Override mimetype for csv to fix issue on windows
|
||||
mimetypes.add_type("text/csv", ".csv")
|
||||
|
||||
# Optional Transcription support
|
||||
IS_AUDIO_TRANSCRIPTION_CAPABLE = False
|
||||
filterwarnings("ignore", message=r".*Couldn\'t find ffmpeg or avconv.*", module="pydub")
|
||||
try:
|
||||
import pydub
|
||||
import speech_recognition as sr
|
||||
|
||||
IS_AUDIO_TRANSCRIPTION_CAPABLE = True
|
||||
except ModuleNotFoundError:
|
||||
pass
|
||||
finally:
|
||||
resetwarnings()
|
||||
|
||||
# Optional YouTube transcription support
|
||||
try:
|
||||
@@ -71,7 +84,14 @@ class _CustomMarkdownify(markdownify.MarkdownConverter):
|
||||
# Explicitly cast options to the expected type if necessary
|
||||
super().__init__(**options)
|
||||
|
||||
def convert_hn(self, n: int, el: Any, text: str, convert_as_inline: bool) -> str:
|
||||
def convert_hn(
|
||||
self,
|
||||
n: int,
|
||||
el: Any,
|
||||
text: str,
|
||||
convert_as_inline: Optional[bool] = False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
"""Same as usual, but be sure to start with a new line"""
|
||||
if not convert_as_inline:
|
||||
if not re.search(r"^\n", text):
|
||||
@@ -79,7 +99,9 @@ class _CustomMarkdownify(markdownify.MarkdownConverter):
|
||||
|
||||
return super().convert_hn(n, el, text, convert_as_inline) # type: ignore
|
||||
|
||||
def convert_a(self, el: Any, text: str, convert_as_inline: bool):
|
||||
def convert_a(
|
||||
self, el: Any, text: str, convert_as_inline: Optional[bool] = False, **kwargs
|
||||
):
|
||||
"""Same as usual converter, but removes Javascript links and escapes URIs."""
|
||||
prefix, suffix, text = markdownify.chomp(text) # type: ignore
|
||||
if not text:
|
||||
@@ -115,7 +137,9 @@ class _CustomMarkdownify(markdownify.MarkdownConverter):
|
||||
else text
|
||||
)
|
||||
|
||||
def convert_img(self, el: Any, text: str, convert_as_inline: bool) -> str:
|
||||
def convert_img(
|
||||
self, el: Any, text: str, convert_as_inline: Optional[bool] = False, **kwargs
|
||||
) -> str:
|
||||
"""Same as usual converter, but removes data URIs"""
|
||||
|
||||
alt = el.attrs.get("alt", None) or ""
|
||||
@@ -169,7 +193,10 @@ class PlainTextConverter(DocumentConverter):
|
||||
# Only accept text files
|
||||
if content_type is None:
|
||||
return None
|
||||
elif "text/" not in content_type.lower():
|
||||
elif all(
|
||||
not content_type.lower().startswith(type_prefix)
|
||||
for type_prefix in ["text/", "application/json"]
|
||||
):
|
||||
return None
|
||||
|
||||
text_content = str(from_path(local_path).best())
|
||||
@@ -197,7 +224,7 @@ class HtmlConverter(DocumentConverter):
|
||||
return result
|
||||
|
||||
def _convert(self, html_content: str) -> Union[None, DocumentConverterResult]:
|
||||
"""Helper function that converts and HTML string."""
|
||||
"""Helper function that converts an HTML string."""
|
||||
|
||||
# Parse the string
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
@@ -216,12 +243,152 @@ class HtmlConverter(DocumentConverter):
|
||||
|
||||
assert isinstance(webpage_text, str)
|
||||
|
||||
# remove leading and trailing \n
|
||||
webpage_text = webpage_text.strip()
|
||||
|
||||
return DocumentConverterResult(
|
||||
title=None if soup.title is None else soup.title.string,
|
||||
text_content=webpage_text,
|
||||
)
|
||||
|
||||
|
||||
class RSSConverter(DocumentConverter):
|
||||
"""Convert RSS / Atom type to markdown"""
|
||||
|
||||
def convert(
|
||||
self, local_path: str, **kwargs
|
||||
) -> Union[None, DocumentConverterResult]:
|
||||
# Bail if not RSS type
|
||||
extension = kwargs.get("file_extension", "")
|
||||
if extension.lower() not in [".xml", ".rss", ".atom"]:
|
||||
return None
|
||||
try:
|
||||
doc = minidom.parse(local_path)
|
||||
except BaseException as _:
|
||||
return None
|
||||
result = None
|
||||
if doc.getElementsByTagName("rss"):
|
||||
# A RSS feed must have a root element of <rss>
|
||||
result = self._parse_rss_type(doc)
|
||||
elif doc.getElementsByTagName("feed"):
|
||||
root = doc.getElementsByTagName("feed")[0]
|
||||
if root.getElementsByTagName("entry"):
|
||||
# An Atom feed must have a root element of <feed> and at least one <entry>
|
||||
result = self._parse_atom_type(doc)
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
# not rss or atom
|
||||
return None
|
||||
|
||||
return result
|
||||
|
||||
def _parse_atom_type(
|
||||
self, doc: minidom.Document
|
||||
) -> Union[None, DocumentConverterResult]:
|
||||
"""Parse the type of an Atom feed.
|
||||
|
||||
Returns None if the feed type is not recognized or something goes wrong.
|
||||
"""
|
||||
try:
|
||||
root = doc.getElementsByTagName("feed")[0]
|
||||
title = self._get_data_by_tag_name(root, "title")
|
||||
subtitle = self._get_data_by_tag_name(root, "subtitle")
|
||||
entries = root.getElementsByTagName("entry")
|
||||
md_text = f"# {title}\n"
|
||||
if subtitle:
|
||||
md_text += f"{subtitle}\n"
|
||||
for entry in entries:
|
||||
entry_title = self._get_data_by_tag_name(entry, "title")
|
||||
entry_summary = self._get_data_by_tag_name(entry, "summary")
|
||||
entry_updated = self._get_data_by_tag_name(entry, "updated")
|
||||
entry_content = self._get_data_by_tag_name(entry, "content")
|
||||
|
||||
if entry_title:
|
||||
md_text += f"\n## {entry_title}\n"
|
||||
if entry_updated:
|
||||
md_text += f"Updated on: {entry_updated}\n"
|
||||
if entry_summary:
|
||||
md_text += self._parse_content(entry_summary)
|
||||
if entry_content:
|
||||
md_text += self._parse_content(entry_content)
|
||||
|
||||
return DocumentConverterResult(
|
||||
title=title,
|
||||
text_content=md_text,
|
||||
)
|
||||
except BaseException as _:
|
||||
return None
|
||||
|
||||
def _parse_rss_type(
|
||||
self, doc: minidom.Document
|
||||
) -> Union[None, DocumentConverterResult]:
|
||||
"""Parse the type of an RSS feed.
|
||||
|
||||
Returns None if the feed type is not recognized or something goes wrong.
|
||||
"""
|
||||
try:
|
||||
root = doc.getElementsByTagName("rss")[0]
|
||||
channel = root.getElementsByTagName("channel")
|
||||
if not channel:
|
||||
return None
|
||||
channel = channel[0]
|
||||
channel_title = self._get_data_by_tag_name(channel, "title")
|
||||
channel_description = self._get_data_by_tag_name(channel, "description")
|
||||
items = channel.getElementsByTagName("item")
|
||||
if channel_title:
|
||||
md_text = f"# {channel_title}\n"
|
||||
if channel_description:
|
||||
md_text += f"{channel_description}\n"
|
||||
if not items:
|
||||
items = []
|
||||
for item in items:
|
||||
title = self._get_data_by_tag_name(item, "title")
|
||||
description = self._get_data_by_tag_name(item, "description")
|
||||
pubDate = self._get_data_by_tag_name(item, "pubDate")
|
||||
content = self._get_data_by_tag_name(item, "content:encoded")
|
||||
|
||||
if title:
|
||||
md_text += f"\n## {title}\n"
|
||||
if pubDate:
|
||||
md_text += f"Published on: {pubDate}\n"
|
||||
if description:
|
||||
md_text += self._parse_content(description)
|
||||
if content:
|
||||
md_text += self._parse_content(content)
|
||||
|
||||
return DocumentConverterResult(
|
||||
title=channel_title,
|
||||
text_content=md_text,
|
||||
)
|
||||
except BaseException as _:
|
||||
print(traceback.format_exc())
|
||||
return None
|
||||
|
||||
def _parse_content(self, content: str) -> str:
|
||||
"""Parse the content of an RSS feed item"""
|
||||
try:
|
||||
# using bs4 because many RSS feeds have HTML-styled content
|
||||
soup = BeautifulSoup(content, "html.parser")
|
||||
return _CustomMarkdownify().convert_soup(soup)
|
||||
except BaseException as _:
|
||||
return content
|
||||
|
||||
def _get_data_by_tag_name(
|
||||
self, element: minidom.Element, tag_name: str
|
||||
) -> Union[str, None]:
|
||||
"""Get data from first child element with the given tag name.
|
||||
Returns None when no such element is found.
|
||||
"""
|
||||
nodes = element.getElementsByTagName(tag_name)
|
||||
if not nodes:
|
||||
return None
|
||||
fc = nodes[0].firstChild
|
||||
if fc:
|
||||
return fc.data
|
||||
return None
|
||||
|
||||
|
||||
class WikipediaConverter(DocumentConverter):
|
||||
"""Handle Wikipedia pages separately, focusing only on the main document content."""
|
||||
|
||||
@@ -403,6 +570,67 @@ class YouTubeConverter(DocumentConverter):
|
||||
return None
|
||||
|
||||
|
||||
class IpynbConverter(DocumentConverter):
|
||||
"""Converts Jupyter Notebook (.ipynb) files to Markdown."""
|
||||
|
||||
def convert(
|
||||
self, local_path: str, **kwargs: Any
|
||||
) -> Union[None, DocumentConverterResult]:
|
||||
# Bail if not ipynb
|
||||
extension = kwargs.get("file_extension", "")
|
||||
if extension.lower() != ".ipynb":
|
||||
return None
|
||||
|
||||
# Parse and convert the notebook
|
||||
result = None
|
||||
with open(local_path, "rt", encoding="utf-8") as fh:
|
||||
notebook_content = json.load(fh)
|
||||
result = self._convert(notebook_content)
|
||||
|
||||
return result
|
||||
|
||||
def _convert(self, notebook_content: dict) -> Union[None, DocumentConverterResult]:
|
||||
"""Helper function that converts notebook JSON content to Markdown."""
|
||||
try:
|
||||
md_output = []
|
||||
title = None
|
||||
|
||||
for cell in notebook_content.get("cells", []):
|
||||
cell_type = cell.get("cell_type", "")
|
||||
source_lines = cell.get("source", [])
|
||||
|
||||
if cell_type == "markdown":
|
||||
md_output.append("".join(source_lines))
|
||||
|
||||
# Extract the first # heading as title if not already found
|
||||
if title is None:
|
||||
for line in source_lines:
|
||||
if line.startswith("# "):
|
||||
title = line.lstrip("# ").strip()
|
||||
break
|
||||
|
||||
elif cell_type == "code":
|
||||
# Code cells are wrapped in Markdown code blocks
|
||||
md_output.append(f"```python\n{''.join(source_lines)}\n```")
|
||||
elif cell_type == "raw":
|
||||
md_output.append(f"```\n{''.join(source_lines)}\n```")
|
||||
|
||||
md_text = "\n\n".join(md_output)
|
||||
|
||||
# Check for title in notebook metadata
|
||||
title = notebook_content.get("metadata", {}).get("title", title)
|
||||
|
||||
return DocumentConverterResult(
|
||||
title=title,
|
||||
text_content=md_text,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise FileConversionException(
|
||||
f"Error converting .ipynb file: {str(e)}"
|
||||
) from e
|
||||
|
||||
|
||||
class BingSerpConverter(DocumentConverter):
|
||||
"""
|
||||
Handle Bing results pages (only the organic search results).
|
||||
@@ -524,7 +752,31 @@ class XlsxConverter(HtmlConverter):
|
||||
if extension.lower() != ".xlsx":
|
||||
return None
|
||||
|
||||
sheets = pd.read_excel(local_path, sheet_name=None)
|
||||
sheets = pd.read_excel(local_path, sheet_name=None, engine="openpyxl")
|
||||
md_content = ""
|
||||
for s in sheets:
|
||||
md_content += f"## {s}\n"
|
||||
html_content = sheets[s].to_html(index=False)
|
||||
md_content += self._convert(html_content).text_content.strip() + "\n\n"
|
||||
|
||||
return DocumentConverterResult(
|
||||
title=None,
|
||||
text_content=md_content.strip(),
|
||||
)
|
||||
|
||||
|
||||
class XlsConverter(HtmlConverter):
|
||||
"""
|
||||
Converts XLS files to Markdown, with each sheet presented as a separate Markdown table.
|
||||
"""
|
||||
|
||||
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
|
||||
# Bail if not a XLS
|
||||
extension = kwargs.get("file_extension", "")
|
||||
if extension.lower() != ".xls":
|
||||
return None
|
||||
|
||||
sheets = pd.read_excel(local_path, sheet_name=None, engine="xlrd")
|
||||
md_content = ""
|
||||
for s in sheets:
|
||||
md_content += f"## {s}\n"
|
||||
@@ -542,6 +794,35 @@ class PptxConverter(HtmlConverter):
|
||||
Converts PPTX files to Markdown. Supports heading, tables and images with alt text.
|
||||
"""
|
||||
|
||||
def _get_llm_description(
|
||||
self, llm_client, llm_model, image_blob, content_type, prompt=None
|
||||
):
|
||||
if prompt is None or prompt.strip() == "":
|
||||
prompt = "Write a detailed alt text for this image with less than 50 words."
|
||||
|
||||
image_base64 = base64.b64encode(image_blob).decode("utf-8")
|
||||
data_uri = f"data:{content_type};base64,{image_base64}"
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": data_uri,
|
||||
},
|
||||
},
|
||||
{"type": "text", "text": prompt},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
response = llm_client.chat.completions.create(
|
||||
model=llm_model, messages=messages
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
|
||||
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
|
||||
# Bail if not a PPTX
|
||||
extension = kwargs.get("file_extension", "")
|
||||
@@ -562,17 +843,38 @@ class PptxConverter(HtmlConverter):
|
||||
# Pictures
|
||||
if self._is_picture(shape):
|
||||
# https://github.com/scanny/python-pptx/pull/512#issuecomment-1713100069
|
||||
alt_text = ""
|
||||
try:
|
||||
alt_text = shape._element._nvXxPr.cNvPr.attrib.get("descr", "")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
llm_description = None
|
||||
alt_text = None
|
||||
|
||||
llm_client = kwargs.get("llm_client")
|
||||
llm_model = kwargs.get("llm_model")
|
||||
if llm_client is not None and llm_model is not None:
|
||||
try:
|
||||
llm_description = self._get_llm_description(
|
||||
llm_client,
|
||||
llm_model,
|
||||
shape.image.blob,
|
||||
shape.image.content_type,
|
||||
)
|
||||
except Exception:
|
||||
# Unable to describe with LLM
|
||||
pass
|
||||
|
||||
if not llm_description:
|
||||
try:
|
||||
alt_text = shape._element._nvXxPr.cNvPr.attrib.get(
|
||||
"descr", ""
|
||||
)
|
||||
except Exception:
|
||||
# Unable to get alt text
|
||||
pass
|
||||
|
||||
# A placeholder name
|
||||
filename = re.sub(r"\W", "", shape.name) + ".jpg"
|
||||
md_content += (
|
||||
"\n\n"
|
||||
@@ -663,14 +965,13 @@ class MediaConverter(DocumentConverter):
|
||||
Abstract class for multi-modal media (e.g., images and audio)
|
||||
"""
|
||||
|
||||
def _get_metadata(self, local_path):
|
||||
exiftool = shutil.which("exiftool")
|
||||
if not exiftool:
|
||||
def _get_metadata(self, local_path, exiftool_path=None):
|
||||
if not exiftool_path:
|
||||
return None
|
||||
else:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[exiftool, "-json", local_path], capture_output=True, text=True
|
||||
[exiftool_path, "-json", local_path], capture_output=True, text=True
|
||||
).stdout
|
||||
return json.loads(result)[0]
|
||||
except Exception:
|
||||
@@ -683,7 +984,7 @@ class WavConverter(MediaConverter):
|
||||
"""
|
||||
|
||||
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
|
||||
# Bail if not a XLSX
|
||||
# Bail if not a WAV
|
||||
extension = kwargs.get("file_extension", "")
|
||||
if extension.lower() != ".wav":
|
||||
return None
|
||||
@@ -691,7 +992,7 @@ class WavConverter(MediaConverter):
|
||||
md_content = ""
|
||||
|
||||
# Add metadata
|
||||
metadata = self._get_metadata(local_path)
|
||||
metadata = self._get_metadata(local_path, kwargs.get("exiftool_path"))
|
||||
if metadata:
|
||||
for f in [
|
||||
"Title",
|
||||
@@ -746,7 +1047,7 @@ class Mp3Converter(WavConverter):
|
||||
md_content = ""
|
||||
|
||||
# Add metadata
|
||||
metadata = self._get_metadata(local_path)
|
||||
metadata = self._get_metadata(local_path, kwargs.get("exiftool_path"))
|
||||
if metadata:
|
||||
for f in [
|
||||
"Title",
|
||||
@@ -768,6 +1069,14 @@ class Mp3Converter(WavConverter):
|
||||
handle, temp_path = tempfile.mkstemp(suffix=".wav")
|
||||
os.close(handle)
|
||||
try:
|
||||
# Check if pydub defaulted to ffmpeg
|
||||
if pydub.AudioSegment.converter == "ffmpeg" and not shutil.which(
|
||||
"ffmpeg"
|
||||
):
|
||||
warn(
|
||||
"pydub: Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work",
|
||||
RuntimeWarning,
|
||||
)
|
||||
sound = pydub.AudioSegment.from_mp3(local_path)
|
||||
sound.export(temp_path, format="wav")
|
||||
|
||||
@@ -799,7 +1108,7 @@ class ImageConverter(MediaConverter):
|
||||
"""
|
||||
|
||||
def convert(self, local_path, **kwargs) -> Union[None, DocumentConverterResult]:
|
||||
# Bail if not a XLSX
|
||||
# Bail if not an image
|
||||
extension = kwargs.get("file_extension", "")
|
||||
if extension.lower() not in [".jpg", ".jpeg", ".png"]:
|
||||
return None
|
||||
@@ -807,7 +1116,7 @@ class ImageConverter(MediaConverter):
|
||||
md_content = ""
|
||||
|
||||
# Add metadata
|
||||
metadata = self._get_metadata(local_path)
|
||||
metadata = self._get_metadata(local_path, kwargs.get("exiftool_path"))
|
||||
if metadata:
|
||||
for f in [
|
||||
"ImageSize",
|
||||
@@ -876,6 +1185,79 @@ class ImageConverter(MediaConverter):
|
||||
return response.choices[0].message.content
|
||||
|
||||
|
||||
class OutlookMsgConverter(DocumentConverter):
|
||||
"""Converts Outlook .msg files to markdown by extracting email metadata and content.
|
||||
|
||||
Uses the olefile package to parse the .msg file structure and extract:
|
||||
- Email headers (From, To, Subject)
|
||||
- Email body content
|
||||
"""
|
||||
|
||||
def convert(
|
||||
self, local_path: str, **kwargs: Any
|
||||
) -> Union[None, DocumentConverterResult]:
|
||||
# Bail if not a MSG file
|
||||
extension = kwargs.get("file_extension", "")
|
||||
if extension.lower() != ".msg":
|
||||
return None
|
||||
|
||||
try:
|
||||
msg = olefile.OleFileIO(local_path)
|
||||
# Extract email metadata
|
||||
md_content = "# Email Message\n\n"
|
||||
|
||||
# Get headers
|
||||
headers = {
|
||||
"From": self._get_stream_data(msg, "__substg1.0_0C1F001F"),
|
||||
"To": self._get_stream_data(msg, "__substg1.0_0E04001F"),
|
||||
"Subject": self._get_stream_data(msg, "__substg1.0_0037001F"),
|
||||
}
|
||||
|
||||
# Add headers to markdown
|
||||
for key, value in headers.items():
|
||||
if value:
|
||||
md_content += f"**{key}:** {value}\n"
|
||||
|
||||
md_content += "\n## Content\n\n"
|
||||
|
||||
# Get email body
|
||||
body = self._get_stream_data(msg, "__substg1.0_1000001F")
|
||||
if body:
|
||||
md_content += body
|
||||
|
||||
msg.close()
|
||||
|
||||
return DocumentConverterResult(
|
||||
title=headers.get("Subject"), text_content=md_content.strip()
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise FileConversionException(
|
||||
f"Could not convert MSG file '{local_path}': {str(e)}"
|
||||
)
|
||||
|
||||
def _get_stream_data(
|
||||
self, msg: olefile.OleFileIO, stream_path: str
|
||||
) -> Union[str, None]:
|
||||
"""Helper to safely extract and decode stream data from the MSG file."""
|
||||
try:
|
||||
if msg.exists(stream_path):
|
||||
data = msg.openstream(stream_path).read()
|
||||
# Try UTF-16 first (common for .msg files)
|
||||
try:
|
||||
return data.decode("utf-16-le").strip()
|
||||
except UnicodeDecodeError:
|
||||
# Fall back to UTF-8
|
||||
try:
|
||||
return data.decode("utf-8").strip()
|
||||
except UnicodeDecodeError:
|
||||
# Last resort - ignore errors
|
||||
return data.decode("utf-8", errors="ignore").strip()
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
class ZipConverter(DocumentConverter):
|
||||
"""Converts ZIP files to markdown by extracting and converting all contained files.
|
||||
|
||||
@@ -934,27 +1316,33 @@ class ZipConverter(DocumentConverter):
|
||||
extracted_zip_folder_name = (
|
||||
f"extracted_{os.path.basename(local_path).replace('.zip', '_zip')}"
|
||||
)
|
||||
new_folder = os.path.normpath(
|
||||
extraction_dir = os.path.normpath(
|
||||
os.path.join(os.path.dirname(local_path), extracted_zip_folder_name)
|
||||
)
|
||||
md_content = f"Content from the zip file `{os.path.basename(local_path)}`:\n\n"
|
||||
|
||||
# Safety check for path traversal
|
||||
if not new_folder.startswith(os.path.dirname(local_path)):
|
||||
return DocumentConverterResult(
|
||||
title=None, text_content=f"[ERROR] Invalid zip file path: {local_path}"
|
||||
)
|
||||
|
||||
try:
|
||||
# Extract the zip file
|
||||
# Extract the zip file safely
|
||||
with zipfile.ZipFile(local_path, "r") as zipObj:
|
||||
zipObj.extractall(path=new_folder)
|
||||
# Safeguard against path traversal
|
||||
for member in zipObj.namelist():
|
||||
member_path = os.path.normpath(os.path.join(extraction_dir, member))
|
||||
if (
|
||||
not os.path.commonprefix([extraction_dir, member_path])
|
||||
== extraction_dir
|
||||
):
|
||||
raise ValueError(
|
||||
f"Path traversal detected in zip file: {member}"
|
||||
)
|
||||
|
||||
# Extract all files safely
|
||||
zipObj.extractall(path=extraction_dir)
|
||||
|
||||
# Process each extracted file
|
||||
for root, dirs, files in os.walk(new_folder):
|
||||
for root, dirs, files in os.walk(extraction_dir):
|
||||
for name in files:
|
||||
file_path = os.path.join(root, name)
|
||||
relative_path = os.path.relpath(file_path, new_folder)
|
||||
relative_path = os.path.relpath(file_path, extraction_dir)
|
||||
|
||||
# Get file extension
|
||||
_, file_extension = os.path.splitext(name)
|
||||
@@ -978,7 +1366,7 @@ class ZipConverter(DocumentConverter):
|
||||
|
||||
# Clean up extracted files if specified
|
||||
if kwargs.get("cleanup_extracted", True):
|
||||
shutil.rmtree(new_folder)
|
||||
shutil.rmtree(extraction_dir)
|
||||
|
||||
return DocumentConverterResult(title=None, text_content=md_content.strip())
|
||||
|
||||
@@ -987,6 +1375,11 @@ class ZipConverter(DocumentConverter):
|
||||
title=None,
|
||||
text_content=f"[ERROR] Invalid or corrupted zip file: {local_path}",
|
||||
)
|
||||
except ValueError as ve:
|
||||
return DocumentConverterResult(
|
||||
title=None,
|
||||
text_content=f"[ERROR] Security error in zip file {local_path}: {str(ve)}",
|
||||
)
|
||||
except Exception as e:
|
||||
return DocumentConverterResult(
|
||||
title=None,
|
||||
@@ -994,6 +1387,74 @@ class ZipConverter(DocumentConverter):
|
||||
)
|
||||
|
||||
|
||||
class DocumentIntelligenceConverter(DocumentConverter):
|
||||
"""Specialized DocumentConverter that uses Document Intelligence to extract text from documents."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: str,
|
||||
api_version: str = "2024-07-31-preview",
|
||||
):
|
||||
self.endpoint = endpoint
|
||||
self.api_version = api_version
|
||||
self.doc_intel_client = DocumentIntelligenceClient(
|
||||
endpoint=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
credential=DefaultAzureCredential(),
|
||||
)
|
||||
|
||||
def convert(
|
||||
self, local_path: str, **kwargs: Any
|
||||
) -> Union[None, DocumentConverterResult]:
|
||||
# Bail if extension is not supported by Document Intelligence
|
||||
extension = kwargs.get("file_extension", "")
|
||||
docintel_extensions = [
|
||||
".pdf",
|
||||
".docx",
|
||||
".xlsx",
|
||||
".pptx",
|
||||
".html",
|
||||
".jpeg",
|
||||
".jpg",
|
||||
".png",
|
||||
".bmp",
|
||||
".tiff",
|
||||
".heif",
|
||||
]
|
||||
if extension.lower() not in docintel_extensions:
|
||||
return None
|
||||
|
||||
# Get the bytestring for the local path
|
||||
with open(local_path, "rb") as f:
|
||||
file_bytes = f.read()
|
||||
|
||||
# Certain document analysis features are not availiable for filetypes (.xlsx, .pptx, .html)
|
||||
if extension.lower() in [".xlsx", ".pptx", ".html"]:
|
||||
analysis_features = []
|
||||
else:
|
||||
analysis_features = [
|
||||
DocumentAnalysisFeature.FORMULAS, # enable formula extraction
|
||||
DocumentAnalysisFeature.OCR_HIGH_RESOLUTION, # enable high resolution OCR
|
||||
DocumentAnalysisFeature.STYLE_FONT, # enable font style extraction
|
||||
]
|
||||
|
||||
# Extract the text using Azure Document Intelligence
|
||||
poller = self.doc_intel_client.begin_analyze_document(
|
||||
model_id="prebuilt-layout",
|
||||
body=AnalyzeDocumentRequest(bytes_source=file_bytes),
|
||||
features=analysis_features,
|
||||
output_content_format=CONTENT_FORMAT, # TODO: replace with "ContentFormat.MARKDOWN" when the bug is fixed
|
||||
)
|
||||
result: AnalyzeResult = poller.result()
|
||||
|
||||
# remove comments from the markdown content generated by Doc Intelligence and append to markdown string
|
||||
markdown_text = re.sub(r"<!--.*?-->", "", result.content, flags=re.DOTALL)
|
||||
return DocumentConverterResult(
|
||||
title=None,
|
||||
text_content=markdown_text,
|
||||
)
|
||||
|
||||
|
||||
class FileConversionException(BaseException):
|
||||
pass
|
||||
|
||||
@@ -1012,6 +1473,8 @@ class MarkItDown:
|
||||
llm_client: Optional[Any] = None,
|
||||
llm_model: Optional[str] = None,
|
||||
style_map: Optional[str] = None,
|
||||
exiftool_path: Optional[str] = None,
|
||||
docintel_endpoint: Optional[str] = None,
|
||||
# Deprecated
|
||||
mlm_client: Optional[Any] = None,
|
||||
mlm_model: Optional[str] = None,
|
||||
@@ -1021,38 +1484,34 @@ class MarkItDown:
|
||||
else:
|
||||
self._requests_session = requests_session
|
||||
|
||||
# Handle deprecation notices
|
||||
#############################
|
||||
if mlm_client is not None:
|
||||
if llm_client is None:
|
||||
warn(
|
||||
"'mlm_client' is deprecated, and was renamed 'llm_client'.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
llm_client = mlm_client
|
||||
mlm_client = None
|
||||
else:
|
||||
raise ValueError(
|
||||
"'mlm_client' is deprecated, and was renamed 'llm_client'. Do not use both at the same time. Just use 'llm_client' instead."
|
||||
)
|
||||
if exiftool_path is None:
|
||||
exiftool_path = os.environ.get("EXIFTOOL_PATH")
|
||||
|
||||
if mlm_model is not None:
|
||||
if llm_model is None:
|
||||
warn(
|
||||
"'mlm_model' is deprecated, and was renamed 'llm_model'.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
llm_model = mlm_model
|
||||
mlm_model = None
|
||||
else:
|
||||
raise ValueError(
|
||||
"'mlm_model' is deprecated, and was renamed 'llm_model'. Do not use both at the same time. Just use 'llm_model' instead."
|
||||
)
|
||||
#############################
|
||||
# Still none? Check well-known paths
|
||||
if exiftool_path is None:
|
||||
candidate = shutil.which("exiftool")
|
||||
if candidate:
|
||||
candidate = os.path.abspath(candidate)
|
||||
if any(
|
||||
d == os.path.dirname(candidate)
|
||||
for d in [
|
||||
"/usr/bin",
|
||||
"/usr/local/bin",
|
||||
"/opt",
|
||||
"/opt/bin",
|
||||
"/opt/local/bin",
|
||||
"/opt/homebrew/bin",
|
||||
"C:\\Windows\\System32",
|
||||
"C:\\Program Files",
|
||||
"C:\\Program Files (x86)",
|
||||
]
|
||||
):
|
||||
exiftool_path = candidate
|
||||
|
||||
self._llm_client = llm_client
|
||||
self._llm_model = llm_model
|
||||
self._style_map = style_map
|
||||
self._exiftool_path = exiftool_path
|
||||
|
||||
self._page_converters: List[DocumentConverter] = []
|
||||
|
||||
@@ -1061,24 +1520,34 @@ class MarkItDown:
|
||||
# To this end, the most specific converters should appear below the most generic converters
|
||||
self.register_page_converter(PlainTextConverter())
|
||||
self.register_page_converter(HtmlConverter())
|
||||
self.register_page_converter(RSSConverter())
|
||||
self.register_page_converter(WikipediaConverter())
|
||||
self.register_page_converter(YouTubeConverter())
|
||||
self.register_page_converter(BingSerpConverter())
|
||||
self.register_page_converter(DocxConverter())
|
||||
self.register_page_converter(XlsxConverter())
|
||||
self.register_page_converter(XlsConverter())
|
||||
self.register_page_converter(PptxConverter())
|
||||
self.register_page_converter(WavConverter())
|
||||
self.register_page_converter(Mp3Converter())
|
||||
self.register_page_converter(ImageConverter())
|
||||
self.register_page_converter(IpynbConverter())
|
||||
self.register_page_converter(PdfConverter())
|
||||
self.register_page_converter(ZipConverter())
|
||||
self.register_page_converter(OutlookMsgConverter())
|
||||
|
||||
# Register Document Intelligence converter at the top of the stack if endpoint is provided
|
||||
if docintel_endpoint is not None:
|
||||
self.register_page_converter(
|
||||
DocumentIntelligenceConverter(endpoint=docintel_endpoint)
|
||||
)
|
||||
|
||||
def convert(
|
||||
self, source: Union[str, requests.Response], **kwargs: Any
|
||||
self, source: Union[str, requests.Response, Path], **kwargs: Any
|
||||
) -> DocumentConverterResult: # TODO: deal with kwargs
|
||||
"""
|
||||
Args:
|
||||
- source: can be a string representing a path or url, or a requests.response object
|
||||
- source: can be a string representing a path either as string pathlib path object or url, or a requests.response object
|
||||
- extension: specifies the file extension to use when interpreting the file. If None, infer from source (path, uri, content-type, etc.)
|
||||
"""
|
||||
|
||||
@@ -1095,10 +1564,14 @@ class MarkItDown:
|
||||
# Request response
|
||||
elif isinstance(source, requests.Response):
|
||||
return self.convert_response(source, **kwargs)
|
||||
elif isinstance(source, Path):
|
||||
return self.convert_local(source, **kwargs)
|
||||
|
||||
def convert_local(
|
||||
self, path: str, **kwargs: Any
|
||||
self, path: Union[str, Path], **kwargs: Any
|
||||
) -> DocumentConverterResult: # TODO: deal with kwargs
|
||||
if isinstance(path, Path):
|
||||
path = str(path)
|
||||
# Prepare a list of extensions to try (in order of priority)
|
||||
ext = kwargs.get("file_extension")
|
||||
extensions = [ext] if ext is not None else []
|
||||
@@ -1228,12 +1701,15 @@ class MarkItDown:
|
||||
if "llm_model" not in _kwargs and self._llm_model is not None:
|
||||
_kwargs["llm_model"] = self._llm_model
|
||||
|
||||
# Add the list of converters for nested processing
|
||||
_kwargs["_parent_converters"] = self._page_converters
|
||||
|
||||
if "style_map" not in _kwargs and self._style_map is not None:
|
||||
_kwargs["style_map"] = self._style_map
|
||||
|
||||
if "exiftool_path" not in _kwargs and self._exiftool_path is not None:
|
||||
_kwargs["exiftool_path"] = self._exiftool_path
|
||||
|
||||
# Add the list of converters for nested processing
|
||||
_kwargs["_parent_converters"] = self._page_converters
|
||||
|
||||
# If we hit an error log it and keep trying
|
||||
try:
|
||||
res = converter.convert(local_path, **_kwargs)
|
||||
@@ -1268,6 +1744,8 @@ class MarkItDown:
|
||||
ext = ext.strip()
|
||||
if ext == "":
|
||||
return
|
||||
if ext in extensions:
|
||||
return
|
||||
# if ext not in extensions:
|
||||
extensions.append(ext)
|
||||
|
||||
@@ -1276,6 +1754,25 @@ class MarkItDown:
|
||||
# Use puremagic to guess
|
||||
try:
|
||||
guesses = puremagic.magic_file(path)
|
||||
|
||||
# Fix for: https://github.com/microsoft/markitdown/issues/222
|
||||
# If there are no guesses, then try again after trimming leading ASCII whitespaces.
|
||||
# ASCII whitespace characters are those byte values in the sequence b' \t\n\r\x0b\f'
|
||||
# (space, tab, newline, carriage return, vertical tab, form feed).
|
||||
if len(guesses) == 0:
|
||||
with open(path, "rb") as file:
|
||||
while True:
|
||||
char = file.read(1)
|
||||
if not char: # End of file
|
||||
break
|
||||
if not char.isspace():
|
||||
file.seek(file.tell() - 1)
|
||||
break
|
||||
try:
|
||||
guesses = puremagic.magic_stream(file)
|
||||
except puremagic.main.PureError:
|
||||
pass
|
||||
|
||||
extensions = list()
|
||||
for g in guesses:
|
||||
ext = g.extension.strip()
|
||||
|
||||
0
src/markitdown/py.typed
Normal file
0
src/markitdown/py.typed
Normal file
10
tests/test_files/test.json
vendored
Normal file
10
tests/test_files/test.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"key1": "string_value",
|
||||
"key2": 1234,
|
||||
"key3": [
|
||||
"list_value1",
|
||||
"list_value2"
|
||||
],
|
||||
"5b64c88c-b3c3-4510-bcb8-da0b200602d8": "uuid_key",
|
||||
"uuid_value": "9700dc99-6685-40b4-9a3a-5e406dcb37f3"
|
||||
}
|
||||
BIN
tests/test_files/test.xls
vendored
Normal file
BIN
tests/test_files/test.xls
vendored
Normal file
Binary file not shown.
89
tests/test_files/test_notebook.ipynb
vendored
Normal file
89
tests/test_files/test_notebook.ipynb
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f61db80",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Test Notebook"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "3f2a5bbd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"markitdown\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print('markitdown')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9b9c0468",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Code Cell Below"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "37d8088a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"42\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# comment in code\n",
|
||||
"print(42)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e3177bd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"End\n",
|
||||
"\n",
|
||||
"---"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.8"
|
||||
},
|
||||
"title": "Test Notebook Title"
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
BIN
tests/test_files/test_outlook_msg.msg
vendored
Normal file
BIN
tests/test_files/test_outlook_msg.msg
vendored
Normal file
Binary file not shown.
1
tests/test_files/test_rss.xml
vendored
Normal file
1
tests/test_files/test_rss.xml
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -6,8 +6,6 @@ import shutil
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from warnings import catch_warnings, resetwarnings
|
||||
|
||||
from markitdown import MarkItDown
|
||||
|
||||
skip_remote = (
|
||||
@@ -54,6 +52,12 @@ XLSX_TEST_STRINGS = [
|
||||
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
|
||||
]
|
||||
|
||||
XLS_TEST_STRINGS = [
|
||||
"## 09060124-b5e7-4717-9d07-3c046eb",
|
||||
"6ff4173b-42a5-4784-9b19-f49caff4d93d",
|
||||
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
|
||||
]
|
||||
|
||||
DOCX_TEST_STRINGS = [
|
||||
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
|
||||
"49e168b7-d2ae-407f-a055-2167576f39a1",
|
||||
@@ -63,6 +67,15 @@ DOCX_TEST_STRINGS = [
|
||||
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||
]
|
||||
|
||||
MSG_TEST_STRINGS = [
|
||||
"# Email Message",
|
||||
"**From:** test.sender@example.com",
|
||||
"**To:** test.recipient@example.com",
|
||||
"**Subject:** Test Email Message",
|
||||
"## Content",
|
||||
"This is the body of the test email message",
|
||||
]
|
||||
|
||||
DOCX_COMMENT_TEST_STRINGS = [
|
||||
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
|
||||
"49e168b7-d2ae-407f-a055-2167576f39a1",
|
||||
@@ -90,6 +103,13 @@ BLOG_TEST_STRINGS = [
|
||||
"an example where high cost can easily prevent a generic complex",
|
||||
]
|
||||
|
||||
|
||||
RSS_TEST_STRINGS = [
|
||||
"The Official Microsoft Blog",
|
||||
"In the case of AI, it is absolutely true that the industry is moving incredibly fast",
|
||||
]
|
||||
|
||||
|
||||
WIKIPEDIA_TEST_URL = "https://en.wikipedia.org/wiki/Microsoft"
|
||||
WIKIPEDIA_TEST_STRINGS = [
|
||||
"Microsoft entered the operating system (OS) business in 1980 with its own version of [Unix]",
|
||||
@@ -123,6 +143,22 @@ LLM_TEST_STRINGS = [
|
||||
"5bda1dd6",
|
||||
]
|
||||
|
||||
JSON_TEST_STRINGS = [
|
||||
"5b64c88c-b3c3-4510-bcb8-da0b200602d8",
|
||||
"9700dc99-6685-40b4-9a3a-5e406dcb37f3",
|
||||
]
|
||||
|
||||
|
||||
# --- Helper Functions ---
|
||||
def validate_strings(result, expected_strings, exclude_strings=None):
|
||||
"""Validate presence or absence of specific strings."""
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
for string in expected_strings:
|
||||
assert string in text_content
|
||||
if exclude_strings:
|
||||
for string in exclude_strings:
|
||||
assert string not in text_content
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
skip_remote,
|
||||
@@ -156,79 +192,82 @@ def test_markitdown_local() -> None:
|
||||
|
||||
# Test XLSX processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.xlsx"))
|
||||
for test_string in XLSX_TEST_STRINGS:
|
||||
validate_strings(result, XLSX_TEST_STRINGS)
|
||||
|
||||
# Test XLS processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.xls"))
|
||||
for test_string in XLS_TEST_STRINGS:
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
assert test_string in text_content
|
||||
|
||||
# Test DOCX processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.docx"))
|
||||
for test_string in DOCX_TEST_STRINGS:
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
assert test_string in text_content
|
||||
validate_strings(result, DOCX_TEST_STRINGS)
|
||||
|
||||
# Test DOCX processing, with comments
|
||||
result = markitdown.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_with_comment.docx"),
|
||||
style_map="comment-reference => ",
|
||||
)
|
||||
for test_string in DOCX_COMMENT_TEST_STRINGS:
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
assert test_string in text_content
|
||||
validate_strings(result, DOCX_COMMENT_TEST_STRINGS)
|
||||
|
||||
# Test DOCX processing, with comments and setting style_map on init
|
||||
markitdown_with_style_map = MarkItDown(style_map="comment-reference => ")
|
||||
result = markitdown_with_style_map.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_with_comment.docx")
|
||||
)
|
||||
for test_string in DOCX_COMMENT_TEST_STRINGS:
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
assert test_string in text_content
|
||||
validate_strings(result, DOCX_COMMENT_TEST_STRINGS)
|
||||
|
||||
# Test PPTX processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
|
||||
for test_string in PPTX_TEST_STRINGS:
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
assert test_string in text_content
|
||||
validate_strings(result, PPTX_TEST_STRINGS)
|
||||
|
||||
# Test HTML processing
|
||||
result = markitdown.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_blog.html"), url=BLOG_TEST_URL
|
||||
)
|
||||
for test_string in BLOG_TEST_STRINGS:
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
assert test_string in text_content
|
||||
validate_strings(result, BLOG_TEST_STRINGS)
|
||||
|
||||
# Test ZIP file processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_files.zip"))
|
||||
for test_string in DOCX_TEST_STRINGS:
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
assert test_string in text_content
|
||||
validate_strings(result, XLSX_TEST_STRINGS)
|
||||
|
||||
# Test Wikipedia processing
|
||||
result = markitdown.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_wikipedia.html"), url=WIKIPEDIA_TEST_URL
|
||||
)
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
for test_string in WIKIPEDIA_TEST_EXCLUDES:
|
||||
assert test_string not in text_content
|
||||
for test_string in WIKIPEDIA_TEST_STRINGS:
|
||||
assert test_string in text_content
|
||||
validate_strings(result, WIKIPEDIA_TEST_STRINGS, WIKIPEDIA_TEST_EXCLUDES)
|
||||
|
||||
# Test Bing processing
|
||||
result = markitdown.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_serp.html"), url=SERP_TEST_URL
|
||||
)
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
for test_string in SERP_TEST_EXCLUDES:
|
||||
assert test_string not in text_content
|
||||
for test_string in SERP_TEST_STRINGS:
|
||||
validate_strings(result, SERP_TEST_STRINGS, SERP_TEST_EXCLUDES)
|
||||
|
||||
# Test RSS processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_rss.xml"))
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
for test_string in RSS_TEST_STRINGS:
|
||||
assert test_string in text_content
|
||||
|
||||
## Test non-UTF-8 encoding
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_mskanji.csv"))
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
for test_string in CSV_CP932_TEST_STRINGS:
|
||||
assert test_string in text_content
|
||||
validate_strings(result, CSV_CP932_TEST_STRINGS)
|
||||
|
||||
# Test MSG (Outlook email) processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_outlook_msg.msg"))
|
||||
validate_strings(result, MSG_TEST_STRINGS)
|
||||
|
||||
# Test JSON processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.json"))
|
||||
validate_strings(result, JSON_TEST_STRINGS)
|
||||
|
||||
# Test input with leading blank characters
|
||||
input_data = b" \n\n\n<html><body><h1>Test</h1></body></html>"
|
||||
result = markitdown.convert_stream(io.BytesIO(input_data))
|
||||
assert "# Test" in result.text_content
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
@@ -236,47 +275,21 @@ def test_markitdown_local() -> None:
|
||||
reason="do not run if exiftool is not installed",
|
||||
)
|
||||
def test_markitdown_exiftool() -> None:
|
||||
markitdown = MarkItDown()
|
||||
|
||||
# Test JPG metadata processing
|
||||
# Test explicitly setting the location of exiftool
|
||||
which_exiftool = shutil.which("exiftool")
|
||||
markitdown = MarkItDown(exiftool_path=which_exiftool)
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.jpg"))
|
||||
for key in JPG_TEST_EXIFTOOL:
|
||||
target = f"{key}: {JPG_TEST_EXIFTOOL[key]}"
|
||||
assert target in result.text_content
|
||||
|
||||
|
||||
def test_markitdown_deprecation() -> None:
|
||||
try:
|
||||
with catch_warnings(record=True) as w:
|
||||
test_client = object()
|
||||
markitdown = MarkItDown(mlm_client=test_client)
|
||||
assert len(w) == 1
|
||||
assert w[0].category is DeprecationWarning
|
||||
assert markitdown._llm_client == test_client
|
||||
finally:
|
||||
resetwarnings()
|
||||
|
||||
try:
|
||||
with catch_warnings(record=True) as w:
|
||||
markitdown = MarkItDown(mlm_model="gpt-4o")
|
||||
assert len(w) == 1
|
||||
assert w[0].category is DeprecationWarning
|
||||
assert markitdown._llm_model == "gpt-4o"
|
||||
finally:
|
||||
resetwarnings()
|
||||
|
||||
try:
|
||||
test_client = object()
|
||||
markitdown = MarkItDown(mlm_client=test_client, llm_client=test_client)
|
||||
assert False
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
markitdown = MarkItDown(mlm_model="gpt-4o", llm_model="gpt-4o")
|
||||
assert False
|
||||
except ValueError:
|
||||
pass
|
||||
# Test setting the exiftool path through an environment variable
|
||||
os.environ["EXIFTOOL_PATH"] = which_exiftool
|
||||
markitdown = MarkItDown()
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.jpg"))
|
||||
for key in JPG_TEST_EXIFTOOL:
|
||||
target = f"{key}: {JPG_TEST_EXIFTOOL[key]}"
|
||||
assert target in result.text_content
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
@@ -300,8 +313,7 @@ def test_markitdown_llm() -> None:
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""Runs this file's tests from the command line."""
|
||||
test_markitdown_remote()
|
||||
test_markitdown_local()
|
||||
# test_markitdown_remote()
|
||||
# test_markitdown_local()
|
||||
test_markitdown_exiftool()
|
||||
test_markitdown_deprecation()
|
||||
test_markitdown_llm()
|
||||
# test_markitdown_llm()
|
||||
|
||||
Reference in New Issue
Block a user