chore: update to head
This commit is contained in:
parent
11ab04bfb4
commit
767b8d611d
4 changed files with 72 additions and 1898 deletions
|
|
@ -33,15 +33,68 @@ ACCEPTED_XLS_MIME_TYPE_PREFIXES = [
|
|||
ACCEPTED_XLS_FILE_EXTENSIONS = [".xls"]
|
||||
|
||||
|
||||
class XlsxConverter(DocumentConverter):
|
||||
"""
|
||||
Converts XLSX files to Markdown, with each sheet presented as a separate Markdown table.
|
||||
"""
|
||||
class ExcelConverterBase(DocumentConverter):
|
||||
"""Base class for Excel-like converters"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._html_converter = HtmlConverter()
|
||||
|
||||
def _clean_colname(self, colname: Any) -> Any:
|
||||
# Remove Pandas header placeholders
|
||||
if isinstance(colname, str) and colname.startswith("Unnamed:"):
|
||||
return None
|
||||
return colname
|
||||
|
||||
def _convert_excel(
|
||||
self,
|
||||
file_stream: BinaryIO,
|
||||
stream_info: StreamInfo,
|
||||
engine: str,
|
||||
na_rep: Any = "",
|
||||
remove_header_placeholders: bool = True,
|
||||
drop_empty_cols: bool = False,
|
||||
drop_empty_rows: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> DocumentConverterResult:
|
||||
sheets = pd.read_excel(file_stream, sheet_name=None, engine=engine)
|
||||
md_content = ""
|
||||
for name, sheet in sheets.items():
|
||||
md_content += f"## {name}\n"
|
||||
|
||||
if remove_header_placeholders:
|
||||
sheet = sheet.rename(columns=lambda col: self._clean_colname(col))
|
||||
|
||||
if drop_empty_cols:
|
||||
# Also consider headers to be part of the column
|
||||
sheet = sheet.loc[:, sheet.notna().any() | sheet.columns.notna()]
|
||||
|
||||
if drop_empty_rows:
|
||||
sheet = sheet.dropna(axis=0, how="all")
|
||||
|
||||
# Coerce any cell that evaluates to `pd.isna(c) == True` to `na_rep`
|
||||
# More reliable than using `.to_html(na_rep=...)`: https://github.com/pandas-dev/pandas/issues/11953
|
||||
# Because the latter does not replace NaT's
|
||||
with pd.option_context("future.no_silent_downcasting", True):
|
||||
sheet = sheet.fillna(na_rep, axis=1).infer_objects(copy=False)
|
||||
sheet.columns = sheet.columns.fillna(na_rep)
|
||||
|
||||
html_content = sheet.to_html(index=False, na_rep=na_rep)
|
||||
md_content += (
|
||||
self._html_converter.convert_string(
|
||||
html_content, **kwargs
|
||||
).markdown.strip()
|
||||
+ "\n\n"
|
||||
)
|
||||
|
||||
return DocumentConverterResult(markdown=md_content.strip())
|
||||
|
||||
|
||||
class XlsxConverter(ExcelConverterBase):
|
||||
"""
|
||||
Converts XLSX files to Markdown, with each sheet presented as a separate Markdown table.
|
||||
"""
|
||||
|
||||
def accepts(
|
||||
self,
|
||||
file_stream: BinaryIO,
|
||||
|
|
@ -80,30 +133,19 @@ class XlsxConverter(DocumentConverter):
|
|||
_xlsx_dependency_exc_info[2]
|
||||
)
|
||||
|
||||
sheets = pd.read_excel(file_stream, sheet_name=None, engine="openpyxl")
|
||||
md_content = ""
|
||||
for s in sheets:
|
||||
md_content += f"## {s}\n"
|
||||
html_content = sheets[s].to_html(index=False)
|
||||
md_content += (
|
||||
self._html_converter.convert_string(
|
||||
html_content, **kwargs
|
||||
).markdown.strip()
|
||||
+ "\n\n"
|
||||
)
|
||||
|
||||
return DocumentConverterResult(markdown=md_content.strip())
|
||||
return self._convert_excel(
|
||||
file_stream=file_stream,
|
||||
stream_info=stream_info,
|
||||
engine="openpyxl",
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class XlsConverter(DocumentConverter):
|
||||
class XlsConverter(ExcelConverterBase):
|
||||
"""
|
||||
Converts XLS files to Markdown, with each sheet presented as a separate Markdown table.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._html_converter = HtmlConverter()
|
||||
|
||||
def accepts(
|
||||
self,
|
||||
file_stream: BinaryIO,
|
||||
|
|
@ -142,16 +184,9 @@ class XlsConverter(DocumentConverter):
|
|||
_xls_dependency_exc_info[2]
|
||||
)
|
||||
|
||||
sheets = pd.read_excel(file_stream, sheet_name=None, engine="xlrd")
|
||||
md_content = ""
|
||||
for s in sheets:
|
||||
md_content += f"## {s}\n"
|
||||
html_content = sheets[s].to_html(index=False)
|
||||
md_content += (
|
||||
self._html_converter.convert_string(
|
||||
html_content, **kwargs
|
||||
).markdown.strip()
|
||||
+ "\n\n"
|
||||
)
|
||||
|
||||
return DocumentConverterResult(markdown=md_content.strip())
|
||||
return self._convert_excel(
|
||||
file_stream=file_stream,
|
||||
stream_info=stream_info,
|
||||
engine="xlrd",
|
||||
**kwargs,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ GENERAL_TEST_VECTORS = [
|
|||
"6ff4173b-42a5-4784-9b19-f49caff4d93d",
|
||||
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
|
||||
],
|
||||
must_not_include=[],
|
||||
must_not_include=["Unnamed:", "NaN"],
|
||||
),
|
||||
FileTestVector(
|
||||
filename="test.xls",
|
||||
|
|
@ -53,7 +53,7 @@ GENERAL_TEST_VECTORS = [
|
|||
"6ff4173b-42a5-4784-9b19-f49caff4d93d",
|
||||
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
|
||||
],
|
||||
must_not_include=[],
|
||||
must_not_include=["Unnamed:", "NaN"],
|
||||
),
|
||||
FileTestVector(
|
||||
filename="test.pptx",
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,312 +0,0 @@
|
|||
#!/usr/bin/env python3 -m pytest
|
||||
import io
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from warnings import catch_warnings, resetwarnings
|
||||
|
||||
from markitdown import MarkItDown
|
||||
|
||||
skip_remote = (
|
||||
True if os.environ.get("GITHUB_ACTIONS") else False
|
||||
) # Don't run these tests in CI
|
||||
|
||||
|
||||
# Don't run the llm tests without a key and the client library
|
||||
skip_llm = False if os.environ.get("OPENAI_API_KEY") else True
|
||||
try:
|
||||
import openai
|
||||
except ModuleNotFoundError:
|
||||
skip_llm = True
|
||||
|
||||
# Skip exiftool tests if not installed
|
||||
skip_exiftool = shutil.which("exiftool") is None
|
||||
|
||||
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "test_files")
|
||||
|
||||
JPG_TEST_EXIFTOOL = {
|
||||
"Author": "AutoGen Authors",
|
||||
"Title": "AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||
"Description": "AutoGen enables diverse LLM-based applications",
|
||||
"ImageSize": "1615x1967",
|
||||
"DateTimeOriginal": "2024:03:14 22:10:00",
|
||||
}
|
||||
|
||||
PDF_TEST_URL = "https://arxiv.org/pdf/2308.08155v2.pdf"
|
||||
PDF_TEST_STRINGS = [
|
||||
"While there is contemporaneous exploration of multi-agent approaches"
|
||||
]
|
||||
|
||||
YOUTUBE_TEST_URL = "https://www.youtube.com/watch?v=V2qZ_lgxTzg"
|
||||
YOUTUBE_TEST_STRINGS = [
|
||||
"## AutoGen FULL Tutorial with Python (Step-By-Step)",
|
||||
"This is an intermediate tutorial for installing and using AutoGen locally",
|
||||
"PT15M4S",
|
||||
"the model we're going to be using today is GPT 3.5 turbo", # From the transcript
|
||||
]
|
||||
|
||||
XLSX_TEST_STRINGS = [
|
||||
"## 09060124-b5e7-4717-9d07-3c046eb",
|
||||
"6ff4173b-42a5-4784-9b19-f49caff4d93d",
|
||||
"affc7dad-52dc-4b98-9b5d-51e65d8a8ad0",
|
||||
]
|
||||
|
||||
XLSX_TEST_EXCLUDES = ["Unnamed:", "NaN"]
|
||||
|
||||
|
||||
DOCX_TEST_STRINGS = [
|
||||
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
|
||||
"49e168b7-d2ae-407f-a055-2167576f39a1",
|
||||
"## d666f1f7-46cb-42bd-9a39-9a39cf2a509f",
|
||||
"# Abstract",
|
||||
"# Introduction",
|
||||
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||
]
|
||||
|
||||
DOCX_COMMENT_TEST_STRINGS = [
|
||||
"314b0a30-5b04-470b-b9f7-eed2c2bec74a",
|
||||
"49e168b7-d2ae-407f-a055-2167576f39a1",
|
||||
"## d666f1f7-46cb-42bd-9a39-9a39cf2a509f",
|
||||
"# Abstract",
|
||||
"# Introduction",
|
||||
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||
"This is a test comment. 12df-321a",
|
||||
"Yet another comment in the doc. 55yiyi-asd09",
|
||||
]
|
||||
|
||||
PPTX_TEST_STRINGS = [
|
||||
"2cdda5c8-e50e-4db4-b5f0-9722a649f455",
|
||||
"04191ea8-5c73-4215-a1d3-1cfb43aaaf12",
|
||||
"44bf7d06-5e7a-4a40-a2e1-a2e42ef28c8a",
|
||||
"1b92870d-e3b5-4e65-8153-919f4ff45592",
|
||||
"AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation",
|
||||
"a3f6004b-6f4f-4ea8-bee3-3741f4dc385f", # chart title
|
||||
"2003", # chart value
|
||||
]
|
||||
|
||||
BLOG_TEST_URL = "https://microsoft.github.io/autogen/blog/2023/04/21/LLM-tuning-math"
|
||||
BLOG_TEST_STRINGS = [
|
||||
"Large language models (LLMs) are powerful tools that can generate natural language texts for various applications, such as chatbots, summarization, translation, and more. GPT-4 is currently the state of the art LLM in the world. Is model selection irrelevant? What about inference parameters?",
|
||||
"an example where high cost can easily prevent a generic complex",
|
||||
]
|
||||
|
||||
|
||||
RSS_TEST_STRINGS = [
|
||||
"The Official Microsoft Blog",
|
||||
"In the case of AI, it is absolutely true that the industry is moving incredibly fast",
|
||||
]
|
||||
|
||||
|
||||
WIKIPEDIA_TEST_URL = "https://en.wikipedia.org/wiki/Microsoft"
|
||||
WIKIPEDIA_TEST_STRINGS = [
|
||||
"Microsoft entered the operating system (OS) business in 1980 with its own version of [Unix]",
|
||||
'Microsoft was founded by [Bill Gates](/wiki/Bill_Gates "Bill Gates")',
|
||||
]
|
||||
WIKIPEDIA_TEST_EXCLUDES = [
|
||||
"You are encouraged to create an account and log in",
|
||||
"154 languages",
|
||||
"move to sidebar",
|
||||
]
|
||||
|
||||
SERP_TEST_URL = "https://www.bing.com/search?q=microsoft+wikipedia"
|
||||
SERP_TEST_STRINGS = [
|
||||
"](https://en.wikipedia.org/wiki/Microsoft",
|
||||
"Microsoft Corporation is **an American multinational corporation and technology company headquartered** in Redmond",
|
||||
"1995–2007: Foray into the Web, Windows 95, Windows XP, and Xbox",
|
||||
]
|
||||
SERP_TEST_EXCLUDES = [
|
||||
"https://www.bing.com/ck/a?!&&p=",
|
||||
"data:image/svg+xml,%3Csvg%20width%3D",
|
||||
]
|
||||
|
||||
CSV_CP932_TEST_STRINGS = [
|
||||
"名前,年齢,住所",
|
||||
"佐藤太郎,30,東京",
|
||||
"三木英子,25,大阪",
|
||||
"髙橋淳,35,名古屋",
|
||||
]
|
||||
|
||||
LLM_TEST_STRINGS = [
|
||||
"5bda1dd6",
|
||||
]
|
||||
|
||||
|
||||
# --- Helper Functions ---
|
||||
def validate_strings(result, expected_strings, exclude_strings=None):
|
||||
"""Validate presence or absence of specific strings."""
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
for string in expected_strings:
|
||||
assert string in text_content
|
||||
if exclude_strings:
|
||||
for string in exclude_strings:
|
||||
assert string not in text_content
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
skip_remote,
|
||||
reason="do not run tests that query external urls",
|
||||
)
|
||||
def test_markitdown_remote() -> None:
|
||||
markitdown = MarkItDown()
|
||||
|
||||
# By URL
|
||||
result = markitdown.convert(PDF_TEST_URL)
|
||||
for test_string in PDF_TEST_STRINGS:
|
||||
assert test_string in result.text_content
|
||||
|
||||
# By stream
|
||||
response = requests.get(PDF_TEST_URL)
|
||||
result = markitdown.convert_stream(
|
||||
io.BytesIO(response.content), file_extension=".pdf", url=PDF_TEST_URL
|
||||
)
|
||||
for test_string in PDF_TEST_STRINGS:
|
||||
assert test_string in result.text_content
|
||||
|
||||
# Youtube
|
||||
# TODO: This test randomly fails for some reason. Haven't been able to repro it yet. Disabling until I can debug the issue
|
||||
# result = markitdown.convert(YOUTUBE_TEST_URL)
|
||||
# for test_string in YOUTUBE_TEST_STRINGS:
|
||||
# assert test_string in result.text_content
|
||||
|
||||
|
||||
def test_markitdown_local() -> None:
|
||||
markitdown = MarkItDown()
|
||||
|
||||
# Test XLSX processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.xlsx"))
|
||||
validate_strings(result, XLSX_TEST_STRINGS, XLSX_TEST_EXCLUDES)
|
||||
|
||||
# Test DOCX processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.docx"))
|
||||
validate_strings(result, DOCX_TEST_STRINGS)
|
||||
|
||||
# Test DOCX processing, with comments
|
||||
result = markitdown.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_with_comment.docx"),
|
||||
style_map="comment-reference => ",
|
||||
)
|
||||
validate_strings(result, DOCX_COMMENT_TEST_STRINGS)
|
||||
|
||||
# Test DOCX processing, with comments and setting style_map on init
|
||||
markitdown_with_style_map = MarkItDown(style_map="comment-reference => ")
|
||||
result = markitdown_with_style_map.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_with_comment.docx")
|
||||
)
|
||||
validate_strings(result, DOCX_COMMENT_TEST_STRINGS)
|
||||
|
||||
# Test PPTX processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.pptx"))
|
||||
validate_strings(result, PPTX_TEST_STRINGS)
|
||||
|
||||
# Test HTML processing
|
||||
result = markitdown.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_blog.html"), url=BLOG_TEST_URL
|
||||
)
|
||||
validate_strings(result, BLOG_TEST_STRINGS)
|
||||
|
||||
# Test ZIP file processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_files.zip"))
|
||||
validate_strings(result, XLSX_TEST_STRINGS)
|
||||
|
||||
# Test Wikipedia processing
|
||||
result = markitdown.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_wikipedia.html"), url=WIKIPEDIA_TEST_URL
|
||||
)
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
validate_strings(result, WIKIPEDIA_TEST_STRINGS, WIKIPEDIA_TEST_EXCLUDES)
|
||||
|
||||
# Test Bing processing
|
||||
result = markitdown.convert(
|
||||
os.path.join(TEST_FILES_DIR, "test_serp.html"), url=SERP_TEST_URL
|
||||
)
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
validate_strings(result, SERP_TEST_STRINGS, SERP_TEST_EXCLUDES)
|
||||
|
||||
# Test RSS processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_rss.xml"))
|
||||
text_content = result.text_content.replace("\\", "")
|
||||
for test_string in RSS_TEST_STRINGS:
|
||||
assert test_string in text_content
|
||||
|
||||
## Test non-UTF-8 encoding
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_mskanji.csv"))
|
||||
validate_strings(result, CSV_CP932_TEST_STRINGS)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
skip_exiftool,
|
||||
reason="do not run if exiftool is not installed",
|
||||
)
|
||||
def test_markitdown_exiftool() -> None:
|
||||
markitdown = MarkItDown()
|
||||
|
||||
# Test JPG metadata processing
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test.jpg"))
|
||||
for key in JPG_TEST_EXIFTOOL:
|
||||
target = f"{key}: {JPG_TEST_EXIFTOOL[key]}"
|
||||
assert target in result.text_content
|
||||
|
||||
|
||||
def test_markitdown_deprecation() -> None:
|
||||
try:
|
||||
with catch_warnings(record=True) as w:
|
||||
test_client = object()
|
||||
markitdown = MarkItDown(mlm_client=test_client)
|
||||
assert len(w) == 1
|
||||
assert w[0].category is DeprecationWarning
|
||||
assert markitdown._llm_client == test_client
|
||||
finally:
|
||||
resetwarnings()
|
||||
|
||||
try:
|
||||
with catch_warnings(record=True) as w:
|
||||
markitdown = MarkItDown(mlm_model="gpt-4o")
|
||||
assert len(w) == 1
|
||||
assert w[0].category is DeprecationWarning
|
||||
assert markitdown._llm_model == "gpt-4o"
|
||||
finally:
|
||||
resetwarnings()
|
||||
|
||||
try:
|
||||
test_client = object()
|
||||
markitdown = MarkItDown(mlm_client=test_client, llm_client=test_client)
|
||||
assert False
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
markitdown = MarkItDown(mlm_model="gpt-4o", llm_model="gpt-4o")
|
||||
assert False
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
skip_llm,
|
||||
reason="do not run llm tests without a key",
|
||||
)
|
||||
def test_markitdown_llm() -> None:
|
||||
client = openai.OpenAI()
|
||||
markitdown = MarkItDown(llm_client=client, llm_model="gpt-4o")
|
||||
|
||||
result = markitdown.convert(os.path.join(TEST_FILES_DIR, "test_llm.jpg"))
|
||||
|
||||
for test_string in LLM_TEST_STRINGS:
|
||||
assert test_string in result.text_content
|
||||
|
||||
# This is not super precise. It would also accept "red square", "blue circle",
|
||||
# "the square is not blue", etc. But it's sufficient for this test.
|
||||
for test_string in ["red", "circle", "blue", "square"]:
|
||||
assert test_string in result.text_content.lower()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""Runs this file's tests from the command line."""
|
||||
test_markitdown_remote()
|
||||
test_markitdown_local()
|
||||
test_markitdown_exiftool()
|
||||
test_markitdown_deprecation()
|
||||
test_markitdown_llm()
|
||||
Loading…
Reference in a new issue