This paper presents a framework to restore the 2D content printed
on documents in the presence of geometric distortion and nonuniform
illumination. Compared with text-based document imaging approaches
that correct distortion to a level necessary to obtain sufficiently
readable text or to facilitate optical character recognition (OCR),
our work targets nontextual documents where the original printed
content is desired. To achieve this goal, our framework acquires
a 3D scan of the document's surface together with a high-resolution
image. Conformal mapping is used to rectify geometric distortion
by mapping the 3D surface back to a plane while minimizing angular
distortion. This conformal "deskewing" assumes no parametric model
of the document's surface and is suitable for arbitrary distortions.
Illumination correction is performed by using the 3D shape to distinguish
content gradient edges from illumination gradient edges in the high-resolution
image. Integration is performed using only the content edges to obtain
a reflectance image with significantly less illumination artifacts.
This approach makes no assumptions about light sources and their
positions. The results from the geometric and photometric correction
are combined to produce the final output.
%0 Journal Article
%1 Brown2007
%A Brown, M.S.
%A Sun, Mingxuan
%A Yang, Ruigang
%A Yun, Lin
%A Seales, W.B.
%D 2007
%I IEEE Computer Society
%K 3D artifact, character content content, distorted distortion, document document, edge, geometric gradient illumination illumination, image image, imaging nonuniform optical printed processing, readable recognition, recognition2D reflectance restoration, scan, shape, surface text, text-based
%N 11
%P 1904-1916
%R 10.1109/TPAMI.2007.1118
%T Restoring 2D Content from Distorted Documents
%V 29
%X This paper presents a framework to restore the 2D content printed
on documents in the presence of geometric distortion and nonuniform
illumination. Compared with text-based document imaging approaches
that correct distortion to a level necessary to obtain sufficiently
readable text or to facilitate optical character recognition (OCR),
our work targets nontextual documents where the original printed
content is desired. To achieve this goal, our framework acquires
a 3D scan of the document's surface together with a high-resolution
image. Conformal mapping is used to rectify geometric distortion
by mapping the 3D surface back to a plane while minimizing angular
distortion. This conformal "deskewing" assumes no parametric model
of the document's surface and is suitable for arbitrary distortions.
Illumination correction is performed by using the 3D shape to distinguish
content gradient edges from illumination gradient edges in the high-resolution
image. Integration is performed using only the content edges to obtain
a reflectance image with significantly less illumination artifacts.
This approach makes no assumptions about light sources and their
positions. The results from the geometric and photometric correction
are combined to produce the final output.
@article{Brown2007,
abstract = {This paper presents a framework to restore the 2D content printed
on documents in the presence of geometric distortion and nonuniform
illumination. Compared with text-based document imaging approaches
that correct distortion to a level necessary to obtain sufficiently
readable text or to facilitate optical character recognition (OCR),
our work targets nontextual documents where the original printed
content is desired. To achieve this goal, our framework acquires
a 3D scan of the document's surface together with a high-resolution
image. Conformal mapping is used to rectify geometric distortion
by mapping the 3D surface back to a plane while minimizing angular
distortion. This conformal "deskewing" assumes no parametric model
of the document's surface and is suitable for arbitrary distortions.
Illumination correction is performed by using the 3D shape to distinguish
content gradient edges from illumination gradient edges in the high-resolution
image. Integration is performed using only the content edges to obtain
a reflectance image with significantly less illumination artifacts.
This approach makes no assumptions about light sources and their
positions. The results from the geometric and photometric correction
are combined to produce the final output.},
added-at = {2011-03-27T19:35:34.000+0200},
author = {Brown, M.S. and Sun, Mingxuan and Yang, Ruigang and Yun, Lin and Seales, W.B.},
biburl = {https://www.bibsonomy.org/bibtex/2d4f954d7f23e620a021942ff8b72f738/cocus},
doi = {10.1109/TPAMI.2007.1118},
file = {:./04302757.pdf:PDF},
interhash = {6bc6bfef066180a824ab57d55d385330},
intrahash = {d4f954d7f23e620a021942ff8b72f738},
issn = {0162-8828},
journaltitle = {#ieeetpami#},
keywords = {3D artifact, character content content, distorted distortion, document document, edge, geometric gradient illumination illumination, image image, imaging nonuniform optical printed processing, readable recognition, recognition2D reflectance restoration, scan, shape, surface text, text-based},
location = {#ieeeaddr#},
month = nov,
number = 11,
pages = {1904-1916},
publisher = {{IEEE} Computer Society},
timestamp = {2011-03-27T19:35:35.000+0200},
title = {Restoring 2D Content from Distorted Documents},
volume = 29,
year = 2007
}