\@input{yenya-text_alignment.aux}
\bibstyle{splncs03}
\bibdata{pan13-notebook}
-\bibcite{chatnoir}{1}
-\@writefile{toc}{\contentsline {section}{\numberline {4}Conclusions}{5}}
+\bibcite{ententen}{1}
+\bibcite{Introduction_to_information_retrieval}{2}
+\bibcite{chatnoir}{3}
+\bibcite{suchomel_kas_12}{4}
+\bibcite{SpiderLink}{5}
+\@writefile{toc}{\contentsline {section}{\numberline {4}Conclusions}{6}}
TITLE = {{ChatNoir: A Search Engine for the ClueWeb09 Corpus}},\r
YEAR = {2012}\r
}\r
+\r
+@inproceedings{suchomel_kas_12,\r
+ added-at = {2012-10-01T11:37:58.000+0200},\r
+ author = {Suchomel, {\v S}imon and Kasprzak, Jan and Brandejs, Michal},\r
+ bibsource = {DBLP, http://dblp.uni-trier.de},\r
+ biburl = {http://www.bibsonomy.org/bibtex/261d1f12dbeffef7de955e8cfa7cec167/promisenoe},\r
+ booktitle = {CLEF (Online Working Notes/Labs/Workshop)},\r
+ editor = {Forner, Pamela and Karlgren, Jussi and Womser-Hacker, Christa},\r
+ ee = {http://www.clef-initiative.eu/documents/71612/1f71592e-ad8a-4c84-833e-46a82b44a9be},\r
+ interhash = {07a49b465de0f0a1993d99d6afb51275},\r
+ intrahash = {61d1f12dbeffef7de955e8cfa7cec167},\r
+ isbn = {978-88-904810-3-1},\r
+ keywords = {dblp},\r
+ timestamp = {2012-10-01T11:37:58.000+0200},\r
+ title = {Three Way Search Engine Queries with Multi-feature Document Comparison for Plagiarism Detection.},\r
+ year = 2012\r
+}\r
+\r
+@book{Introduction_to_information_retrieval,\r
+ abstract = {Class-tested and coherent, this textbook teaches classical and web information retrieval, including web search and the related areas of text classification and text clustering from basic concepts. It gives an up-to-date treatment of all aspects of the design and implementation of systems for gathering, indexing, and searching documents; methods for evaluating systems; and an introduction to the use of machine learning methods on text collections. All the important ideas are explained using examples and figures, making it perfect for introductory courses in information retrieval for advanced undergraduates and graduate students in computer science. Based on feedback from extensive classroom experience, the book has been carefully structured in order to make teaching more natural and effective.},\r
+ added-at = {2012-05-30T10:50:27.000+0200},\r
+ address = {Cambridge, UK},\r
+ author = {Manning, Christopher D. and Raghavan, Prabhakar and Sch{\"u}tze, Hinrich},\r
+ biburl = {http://www.bibsonomy.org/bibtex/28516d94c1f7aa1e391ddd3ace4caa23b/flint63},\r
+ file = {Cambridge University Press Product Page:http\://www.cambridge.org/9780521865715:URL;Amazon Search inside:http\://www.amazon.de/gp/reader/0521865719/:URL;Google Books:http\://books.google.de/books?isbn=978-0-521-86571-5:URL},\r
+ PAGES = {118-120},\r
+ groups = {public},\r
+ interhash = {b6954037b1d444f4afe4cad883b4d80c},\r
+ intrahash = {8516d94c1f7aa1e391ddd3ace4caa23b},\r
+ isbn = {978-0-521-86571-5},\r
+ keywords = {v1205 book ai information retrieval language processing search xml web},\r
+ publisher = {Cambridge University Press},\r
+ timestamp = {2012-05-30T10:50:27.000+0200},\r
+ title = {Introduction to Information Retrieval},\r
+ username = {flint63},\r
+ year = 2008\r
+}\r
+\r
+@MISC{ententen,\r
+ key = "{Corpus}",\r
+ title = "{Sketch Engine EnTenTen Corpus}",\r
+ howpublished = "\url{http://trac.sketchengine.co.uk/wiki/Corpora/enTenTen}",\r
+ year = "2012", \r
+}\r
+\r
+@inproceedings{spiderLink,\r
+ author = {Suchomel, V. and Pomik{a'}lek, J.},\r
+ booktitle = {Proceedings of the seventh Web as Corpus Workshop (WAC7)},\r
+ pages = {39-43},\r
+ editor = {Adam Kilgarriff and Serge Sharoff},\r
+ title = {Efficient Web Crawling for Large Text Corpora},\r
+ year = 2012\r
+}\r
+\r
+@INPROCEEDINGS{awfc,\r
+ author = {Sven Meyer Zu Eissen and Benno Stein},\r
+ title = {Intrinsic Plagiarism Detection},\r
+ booktitle = {Proceedings of the European Conference on Information Retrieval (ECIR-06)},\r
+ year = {2006}\r
+}\r
-This is pdfeTeXk, Version 3.141592-1.11a-2.1 (Web2C 7.5.2) (format=pdflatex 2011.8.15) 28 MAY 2013 14:44
+This is pdfeTeXk, Version 3.141592-1.11a-2.1 (Web2C 7.5.2) (format=pdflatex 2011.8.15) 28 MAY 2013 21:56
entering extended mode
%&-line parsing enabled.
**pan13-notebook.tex
\openout2 = `simon-source_retrieval.aux'.
(./simon-source_retrieval.tex
-<img/source_retrieval_process.pdf, id=14, 518.6878pt x 264.99pt>
+<img/source_retrieval_process.pdf, id=14, 518.6878pt x 256.70906pt>
File: img/source_retrieval_process.pdf Graphic file (type pdf)
<use img/source_retrieval_process.pdf>
<./img/source_retrieval_process.pdf>]
LaTeX Font Info: Font shape `T1/ptm/bx/n' in size <10> not available
(Font) Font shape `T1/ptm/b/n' tried instead on input line 49.
-) [3]
+[3]
+
+LaTeX Warning: Citation `awfc' on page 4 undefined on input line 92.
+
+) [4]
\openout2 = `yenya-text_alignment.aux'.
- (./yenya-text_alignment.tex) [4
+ (./yenya-text_alignment.tex) [5
-] (./pan13-notebook.bbl) [5
+] (./pan13-notebook.bbl) [6
]
(./pan13-notebook.aux (./simon-source_retrieval.aux)
(./yenya-text_alignment.aux)) )
Here is how much of TeX's memory you used:
- 1868 strings out of 94668
- 22666 string characters out of 1175711
- 77666 words of memory out of 1527908
- 4987 multiletter control sequences out of 10000+50000
+ 1873 strings out of 94668
+ 22750 string characters out of 1175711
+ 78682 words of memory out of 1527924
+ 4992 multiletter control sequences out of 10000+50000
47511 words of font info for 49 fonts, out of 1000000 for 2000
458 hyphenation exceptions out of 1000
29i,9n,21p,221b,226s stack positions out of 5000i,500n,6000p,200000b,40000s
- 56 PDF objects out of 300000
+ 59 PDF objects out of 300000
0 named destinations out of 131072
6 words of extra memory for PDF output out of 65536
{/export/packages/share/texlive2003/texmf/dvips/
/ucrr8a.pfb></export/packages/share/texlive2003/texmf/fonts/type1/urw/times/utm
r8a.pfb></export/packages/share/texlive2003/texmf/fonts/type1/urw/times/utmb8a.
pfb>
-Output written on pan13-notebook.pdf (5 pages, 146423 bytes).
+Output written on pan13-notebook.pdf (6 pages, 152139 bytes).
\@writefile{toc}{\contentsline {section}{\numberline {2}Source Retrieval}{2}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Source retrieval process.}}{2}}
\newlabel{fig:source_retr_process}{{1}{2}}
+\citation{suchomel_kas_12}
+\citation{Introduction_to_information_retrieval}
+\citation{ententen}
+\citation{SpiderLink}
\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Querying}{3}}
-\@writefile{toc}{\contentsline {subsubsection}{Keywords Based Queries}{3}}
-\@writefile{toc}{\contentsline {subsubsection}{Intrinsic Plagiarism Based Queries}{3}}
-\@writefile{toc}{\contentsline {subsubsection}{Paragraph Based Queries}{3}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Search Control}{3}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Result Selection}{3}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.4}Snippet Control}{3}}
+\@writefile{toc}{\contentsline {subsubsection}{Keywords Based Queries.}{3}}
+\citation{awfc}
+\citation{suchomel_kas_12}
+\@writefile{toc}{\contentsline {subsubsection}{Intrinsic Plagiarism Based Queries.}{4}}
+\@writefile{toc}{\contentsline {subsubsection}{Paragraph Based Queries.}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Search Control}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Result Selection}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {2.4}Snippet Control}{4}}
\@setckpt{simon-source_retrieval}{
-\setcounter{page}{4}
+\setcounter{page}{5}
\setcounter{equation}{0}
\setcounter{enumi}{0}
\setcounter{enumii}{0}
which scales to the size of the Web. This methodology is based on the fact that we do not\r
possess enough resources to download and effectively process the whole corpus.\r
In the case of PAN 2013 competition the corpus\r
-of source documents is the ClueWeb~\footnote{\url{http://lemurproject.org/clueweb09.php/}} corpus. \r
+of source documents is the ClueWeb\footnote{\url{http://lemurproject.org/clueweb09.php/}} corpus. \r
As a document retrieval tool for the competition we utilized the ChatNoir~\cite{chatnoir} search engine which indexes the English\r
subset of the ClueWeb. \r
The reverse engineering decision process reside in creation of suitable queries on the basis of the suspicious document\r
This is the last decision phase -- what to report.\r
If there is any continuous passage of reused text detected, the result document is reported\r
and the continuous passages in the suspicious document are marked as 'discovered' and no further processing\r
-of those parts is made. \r
+of those parts is done. \r
\r
\subsection{Querying}\r
Querying means to effectively utilize the search engine in order to retrieve as many relevant\r
documents as possible with the minimum amount of queries. We consider the resulting document relevant \r
if it shares some of text characteristics with the suspicious document. \r
\r
-We used 3 different types of queries~\footnote{We used similar three-way based methodology in PAN 2012 \r
-Candidate Document Retrieval subtask. However this time we completely replaced the headers based queries\r
+We used 3 different types of queries\footnote{We used similar three-way based methodology in PAN 2012 \r
+Candidate Document Retrieval subtask. However, this time we completely replaced the headers based queries\r
with paragraph based queries, since the headers based queries did not pay off in the overall process.}:\r
i) keywords based queries, ii) intrinsic plagiarism\r
based queries, and iii) paragraph based queries. Three main properties distinguish each type of query: i) Positional; ii) Phrasal; iii) Deterministic.\r
Deterministic queries for specific suspicious document are always the same no matter how many times we run the software. \r
On the contrary the software can create in two runs potentially different nondeterministic queries.\r
\r
-\subsubsection{Keywords Based Queries}\r
+\subsubsection{Keywords Based Queries.}\r
+The keywords based queries compose of automatically extracted keywords from the whole suspicious document.\r
+Their purpose is to retrieve documents concerning the same theme. Two documents discussing the \r
+same theme usually share a set of overlapping keywords. Also the combination of keywords in\r
+query matters. \r
+As a method for automated keywords extraction, we used a frequency based approach described in~\cite{suchomel_kas_12}.\r
+The method combines term frequency analysis with TF-IDF score~\cite{Introduction_to_information_retrieval}. As a reference\r
+corpus we used English web corpus~\cite{ententen} crawled by SpiderLink~\cite{SpiderLink} in 2012 which contains 4.65 billion tokens. \r
\r
-\subsubsection{Intrinsic Plagiarism Based Queries}\r
-\subsubsection{Paragraph Based Queries}\r
-\subsection{Search Control}\r
+Each keywords based query were constructed from five top ranked keywords consecutively. Each keyword were\r
+used only in one query. Too long keywords based queries would be over-specific and it would have resulted\r
+in a low recall. On the other hand having constructed too short (one or two tokens) queries would have resulted\r
+in a low precision and also possibly low recall since they would be too general.\r
\r
+In order to direct the search more at the highest ranked keywords we also extracted their \r
+most frequent two and three term long collocations. These were combined also into queries of 5 words.\r
+Resulting the 4 top ranked keywords alone can appear in two different queries, one from the keywords\r
+alone and one from the collocations. Collocation describes its keyword better than the keyword alone. \r
\r
-\subsection{Result Selection}\r
-\subsection{Snippet Control}\r
+The keywords based queries are non-positional, since they represent the whole document. They are also non-phrasal since\r
+they are constructed of tokens gathered from different parts of the text. And they are deterministic, for certain input\r
+document the extractor always returns the same keywords.\r
\r
+\subsubsection{Intrinsic Plagiarism Based Queries.}\r
+The second type of queries purpose to retrieve pages which contain similar text detected\r
+as different, in a manner of writing style, from other parts of the suspicious document.\r
+Such a change may point out plagiarized passage which is intrinsically bound up with the text. \r
+We implemented vocabulary richness method which computes average word frequency class value for \r
+a given text part. The method is described in~\cite{awfc}. The problem is that generally methods\r
+based on the vocabulary statistics work better for longer texts. According to authors this method\r
+scales well for shorter texts than other text style detection methods. \r
+Still the usage is in our case limited by relatively short texts. It is also difficult to determine\r
+what parts of text to compare. Therefore we used sliding window concept for text chunking with the \r
+same settings as described in~\cite{suchomel_kas_12}.\r
\r
+A representative sentence longer than 6 words was randomly selected among those that apply from the suspicious part of the document.\r
+An intrinsic plagiarism based query is created from the representative sentence leaving out stop words.\r
+\r
+The intrinsic plagiarism based queries are positional. They carry the position of the representative sentence in the document.\r
+They are phrasal, since they represent a search for a specific sentence. And they are\r
+nondeterministic, because the representative sentence is selected randomly. \r
\r
\r
\r
+\subsubsection{Paragraph Based Queries.}\r
+they were executed as the last asi az v search control\r
+it would be extremely difficult to detect a single sentence other way than by exhaustive searching methods\r
+\r
+\subsection{Search Control}\r
+neoptimalizujeme na spravne utvorene dotazy z klicovych slov - stoji to vice dotazu\r
+\r
+\r
+\subsection{Result Selection}\r
+\subsection{Snippet Control}\r
+\r
\r
+++ /dev/null
-\relax
-\@setckpt{yenya-dtext_alignment}{
-\setcounter{page}{3}
-\setcounter{equation}{0}
-\setcounter{enumi}{0}
-\setcounter{enumii}{0}
-\setcounter{enumiii}{0}
-\setcounter{enumiv}{0}
-\setcounter{footnote}{0}
-\setcounter{mpfootnote}{0}
-\setcounter{part}{0}
-\setcounter{section}{2}
-\setcounter{subsection}{0}
-\setcounter{subsubsection}{0}
-\setcounter{paragraph}{0}
-\setcounter{subparagraph}{0}
-\setcounter{figure}{0}
-\setcounter{table}{0}
-\setcounter{chapter}{1}
-\setcounter{@inst}{1}
-\setcounter{@auth}{3}
-\setcounter{auco}{3}
-\setcounter{theorem}{0}
-\setcounter{case}{0}
-\setcounter{conjecture}{0}
-\setcounter{corollary}{0}
-\setcounter{definition}{0}
-\setcounter{example}{0}
-\setcounter{exercise}{0}
-\setcounter{lemma}{0}
-\setcounter{note}{0}
-\setcounter{problem}{0}
-\setcounter{property}{0}
-\setcounter{proposition}{0}
-\setcounter{question}{0}
-\setcounter{solution}{0}
-\setcounter{remark}{0}
-}
\relax
-\@writefile{toc}{\contentsline {section}{\numberline {3}Text Alignment}{4}}
-\newlabel{text_alignment}{{3}{4}}
+\@writefile{toc}{\contentsline {section}{\numberline {3}Text Alignment}{5}}
+\newlabel{text_alignment}{{3}{5}}
\@setckpt{yenya-text_alignment}{
-\setcounter{page}{5}
+\setcounter{page}{6}
\setcounter{equation}{0}
\setcounter{enumi}{0}
\setcounter{enumii}{0}