mirror of
https://github.com/diocloid/LinkTitles.git
synced 2025-07-13 09:49:31 +02:00
Adjust to fit MediaWiki 1.25+ extension format.
- NEW: Use the new extension format introduced by MediaWiki 1.25; the extension will no longer run with older MediaWiki versions.
This commit is contained in:
410
includes/LinkTitles_Extension.php
Normal file
410
includes/LinkTitles_Extension.php
Normal file
@ -0,0 +1,410 @@
|
||||
<?php
|
||||
/*
|
||||
* Copyright 2012-2016 Daniel Kraus <bovender@bovender.de> ('bovender')
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
* MA 02110-1301, USA.
|
||||
*/
|
||||
/// @file
|
||||
namespace LinkTitles;
|
||||
|
||||
/// Helper function for development and debugging.
|
||||
/// @param $var Any variable. Raw content will be dumped to stderr.
|
||||
/// @return undefined
|
||||
function dump($var) {
|
||||
error_log(print_r($var, TRUE) . "\n", 3, 'php://stderr');
|
||||
};
|
||||
|
||||
/// Central class of the extension. Sets up parser hooks.
|
||||
/// This class contains only static functions; do not instantiate.
|
||||
class Extension {
|
||||
/// A Title object for the page that is being parsed.
|
||||
private static $currentTitle;
|
||||
|
||||
/// A Title object for the target page currently being examined.
|
||||
private static $targetTitle;
|
||||
|
||||
/// The content object for the currently processed target page.
|
||||
/// This variable is necessary to be able to prevent loading the target
|
||||
/// content twice.
|
||||
private static $targetContent;
|
||||
|
||||
/// Holds the page title of the currently processed target page
|
||||
/// as a string.
|
||||
private static $targetTitleText;
|
||||
|
||||
/// Delimiter used in a regexp split operation to seperate those parts
|
||||
/// of the page that should be parsed from those that should not be
|
||||
/// parsed (e.g. inside pre-existing links etc.).
|
||||
private static $delimiter;
|
||||
|
||||
private static $wordStartDelim;
|
||||
private static $wordEndDelim;
|
||||
|
||||
/// Setup method
|
||||
public static function setup() {
|
||||
self::BuildDelimiters();
|
||||
}
|
||||
|
||||
/// Event handler that is hooked to the PageContentSave event.
|
||||
public static function onPageContentSave( &$wikiPage, &$user, &$content, &$summary,
|
||||
$isMinor, $isWatch, $section, &$flags, &$status ) {
|
||||
|
||||
if ( !$isMinor && !\MagicWord::get('MAG_LINKTITLES_NOAUTOLINKS')->match( $text ) ) {
|
||||
$title = $wikiPage->getTitle();
|
||||
$text = $content->getContentHandler()->serializeContent($content);
|
||||
$newText = self::parseContent( $title, $text );
|
||||
if ( $newText != $text ) {
|
||||
$content = $content->getContentHandler()->unserializeContent( $newText );
|
||||
}
|
||||
};
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Event handler that is hooked to the InternalParseBeforeLinks event.
|
||||
/// @param Parser $parser Parser that raised the event.
|
||||
/// @param $text Preprocessed text of the page.
|
||||
public static function onInternalParseBeforeLinks( \Parser &$parser, &$text ) {
|
||||
// If the page contains the magic word '__NOAUTOLINKS__', do not parse it.
|
||||
if ( !isset($parser->mDoubleUnderScores[$text] )) {
|
||||
$text = self::parseContent( $parser->getTitle(), $text );
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Core function of the extension, performs the actual parsing of the content.
|
||||
/// @param Parser $parser Parser instance for the current page
|
||||
/// @param $text String that holds the article content
|
||||
/// @returns string: parsed text with links added if needed
|
||||
private static function parseContent( $title, &$text ) {
|
||||
|
||||
// Configuration variables need to be defined here as globals.
|
||||
global $wgLinkTitlesPreferShortTitles;
|
||||
global $wgLinkTitlesMinimumTitleLength;
|
||||
global $wgLinkTitlesBlackList;
|
||||
global $wgLinkTitlesFirstOnly;
|
||||
global $wgLinkTitlesSmartMode;
|
||||
global $wgCapitalLinks;
|
||||
|
||||
( $wgLinkTitlesPreferShortTitles ) ? $sort_order = 'ASC' : $sort_order = 'DESC';
|
||||
( $wgLinkTitlesFirstOnly ) ? $limit = 1 : $limit = -1;
|
||||
|
||||
self::$currentTitle = $title;
|
||||
$newText = $text;
|
||||
|
||||
// Build a blacklist of pages that are not supposed to be link
|
||||
// targets. This includes the current page.
|
||||
$blackList = str_replace( '_', ' ',
|
||||
'("' . implode( '", "',$wgLinkTitlesBlackList ) . '", "' .
|
||||
self::$currentTitle->getDbKey() . '")' );
|
||||
|
||||
// Build an SQL query and fetch all page titles ordered by length from
|
||||
// shortest to longest. Only titles from 'normal' pages (namespace uid
|
||||
// = 0) are returned. Since the db may be sqlite, we need a try..catch
|
||||
// structure because sqlite does not support the CHAR_LENGTH function.
|
||||
$dbr = wfGetDB( DB_SLAVE );
|
||||
try {
|
||||
$res = $dbr->select(
|
||||
'page',
|
||||
'page_title',
|
||||
array(
|
||||
'page_namespace = 0',
|
||||
'CHAR_LENGTH(page_title) >= ' . $wgLinkTitlesMinimumTitleLength,
|
||||
'page_title NOT IN ' . $blackList,
|
||||
),
|
||||
__METHOD__,
|
||||
array( 'ORDER BY' => 'CHAR_LENGTH(page_title) ' . $sort_order )
|
||||
);
|
||||
} catch (Exception $e) {
|
||||
$res = $dbr->select(
|
||||
'page',
|
||||
'page_title',
|
||||
array(
|
||||
'page_namespace = 0',
|
||||
'LENGTH(page_title) >= ' . $wgLinkTitlesMinimumTitleLength,
|
||||
'page_title NOT IN ' . $blackList,
|
||||
),
|
||||
__METHOD__,
|
||||
array( 'ORDER BY' => 'LENGTH(page_title) ' . $sort_order )
|
||||
);
|
||||
}
|
||||
|
||||
// Iterate through the page titles
|
||||
foreach( $res as $row ) {
|
||||
self::newTarget( $row->page_title );
|
||||
|
||||
// split the page content by [[...]] groups
|
||||
// credits to inhan @ StackOverflow for suggesting preg_split
|
||||
// see http://stackoverflow.com/questions/10672286
|
||||
$arr = preg_split( self::$delimiter, $newText, -1, PREG_SPLIT_DELIM_CAPTURE );
|
||||
|
||||
// Escape certain special characters in the page title to prevent
|
||||
// regexp compilation errors
|
||||
self::$targetTitleText = self::$targetTitle->getText();
|
||||
$quotedTitle = preg_quote(self::$targetTitleText, '/');
|
||||
|
||||
// Depending on the global configuration setting $wgCapitalLinks,
|
||||
// the title has to be searched for either in a strictly case-sensitive
|
||||
// way, or in a 'fuzzy' way where the first letter of the title may
|
||||
// be either case.
|
||||
if ( $wgCapitalLinks && ( $quotedTitle[0] != '\\' )) {
|
||||
$searchTerm = '((?i)' . $quotedTitle[0] . '(?-i)' .
|
||||
substr($quotedTitle, 1) . ')';
|
||||
} else {
|
||||
$searchTerm = '(' . $quotedTitle . ')';
|
||||
}
|
||||
|
||||
$regex = '/(?<![\:\.\@\/\?\&])' . self::$wordStartDelim .
|
||||
$searchTerm . self::$wordEndDelim . '/Su';
|
||||
for ( $i = 0; $i < count( $arr ); $i+=2 ) {
|
||||
// even indexes will point to text that is not enclosed by brackets
|
||||
$arr[$i] = preg_replace_callback( $regex,
|
||||
'LinkTitles\Extension::simpleModeCallback', $arr[$i], $limit, $count );
|
||||
if (( $limit >= 0 ) && ( $count > 0 )) {
|
||||
break;
|
||||
};
|
||||
};
|
||||
$newText = implode( '', $arr );
|
||||
|
||||
// If smart mode is turned on, the extension will perform a second
|
||||
// pass on the page and add links with aliases where the case does
|
||||
// not match.
|
||||
if ($wgLinkTitlesSmartMode) {
|
||||
$arr = preg_split( self::$delimiter, $newText, -1, PREG_SPLIT_DELIM_CAPTURE );
|
||||
|
||||
for ( $i = 0; $i < count( $arr ); $i+=2 ) {
|
||||
// even indexes will point to text that is not enclosed by brackets
|
||||
$arr[$i] = preg_replace_callback( '/(?<![\:\.\@\/\?\&])' .
|
||||
self::$wordStartDelim . '(' . $quotedTitle . ')' .
|
||||
self::$wordEndDelim . '/iuS', 'LinkTitles\Extension::smartModeCallback',
|
||||
$arr[$i], $limit, $count );
|
||||
if (( $limit >= 0 ) && ( $count > 0 )) {
|
||||
break;
|
||||
};
|
||||
};
|
||||
$newText = implode( '', $arr );
|
||||
} // $wgLinkTitlesSmartMode
|
||||
}; // foreach $res as $row
|
||||
return $newText;
|
||||
}
|
||||
|
||||
/// Automatically processes a single page, given a $title Title object.
|
||||
/// This function is called by the SpecialLinkTitles class and the
|
||||
/// LinkTitlesJob class.
|
||||
/// @param string $title Page title.
|
||||
/// @param RequestContext $context Current context.
|
||||
/// If in doubt, call MediaWiki's `RequestContext::getMain()`
|
||||
/// to obtain such an object.
|
||||
/// @returns undefined
|
||||
public static function processPage( $title, \RequestContext $context ) {
|
||||
// TODO: make this namespace-aware
|
||||
$titleObj = \Title::makeTitle(0, $title);
|
||||
$page = \WikiPage::factory($titleObj);
|
||||
$content = $page->getContent();
|
||||
$text = $content->getContentHandler()->serializeContent($content);
|
||||
$newText = self::parseContent($titleObj, $text);
|
||||
if ( $text != $newText ) {
|
||||
$content = $content->getContentHandler()->unserializeContent( $newText );
|
||||
$page->doQuickEditContent($content,
|
||||
$context->getUser(),
|
||||
"Links to existing pages added by LinkTitles bot.", // TODO: i18n
|
||||
true // minor modification
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/// Adds the two magic words defined by this extension to the list of
|
||||
/// 'double-underscore' terms that are automatically removed before a
|
||||
/// page is displayed.
|
||||
/// @param $doubleUnderscoreIDs Array of magic word IDs.
|
||||
/// @return true
|
||||
public static function onGetDoubleUnderscoreIDs( array &$doubleUnderscoreIDs ) {
|
||||
$doubleUnderscoreIDs[] = 'MAG_LINKTITLES_NOTARGET';
|
||||
$doubleUnderscoreIDs[] = 'MAG_LINKTITLES_NOAUTOLINKS';
|
||||
return true;
|
||||
}
|
||||
|
||||
// Build an anonymous callback function to be used in simple mode.
|
||||
private static function simpleModeCallback( array $matches ) {
|
||||
if ( self::checkTargetPage() ) {
|
||||
return '[[' . $matches[0] . ']]';
|
||||
}
|
||||
else
|
||||
{
|
||||
return $matches[0];
|
||||
}
|
||||
}
|
||||
|
||||
// Callback function for use with preg_replace_callback.
|
||||
// This essentially performs a case-sensitive comparison of the
|
||||
// current page title and the occurrence found on the page; if
|
||||
// the cases do not match, it builds an aliased (piped) link.
|
||||
// If $wgCapitalLinks is set to true, the case of the first
|
||||
// letter is ignored by MediaWiki and we don't need to build a
|
||||
// piped link if only the case of the first letter is different.
|
||||
private static function smartModeCallback( array $matches ) {
|
||||
global $wgCapitalLinks;
|
||||
|
||||
if ( $wgCapitalLinks ) {
|
||||
// With $wgCapitalLinks set to true we have a slightly more
|
||||
// complicated version of the callback than if it were false;
|
||||
// we need to ignore the first letter of the page titles, as
|
||||
// it does not matter for linking.
|
||||
if ( self::checkTargetPage() ) {
|
||||
if ( strcmp(substr(self::$targetTitleText, 1), substr($matches[0], 1)) == 0 ) {
|
||||
// Case-sensitive match: no need to bulid piped link.
|
||||
return '[[' . $matches[0] . ']]';
|
||||
} else {
|
||||
// Case-insensitive match: build piped link.
|
||||
return '[[' . self::$targetTitleText . '|' . $matches[0] . ']]';
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return $matches[0];
|
||||
}
|
||||
} else {
|
||||
// If $wgCapitalLinks is false, we can use the simple variant
|
||||
// of the callback function.
|
||||
if ( self::checkTargetPage() ) {
|
||||
if ( strcmp(self::$targetTitleText, $matches[0]) == 0 ) {
|
||||
// Case-sensitive match: no need to bulid piped link.
|
||||
return '[[' . $matches[0] . ']]';
|
||||
} else {
|
||||
// Case-insensitive match: build piped link.
|
||||
return '[[' . self::$targetTitleText . '|' . $matches[0] . ']]';
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return $matches[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets member variables for the current target page.
|
||||
private static function newTarget( $title ) {
|
||||
// @todo Make this wiki namespace aware.
|
||||
self::$targetTitle = \Title::makeTitle( NS_MAIN, $title);
|
||||
self::$targetContent = null;
|
||||
}
|
||||
|
||||
/// Returns the content of the current target page.
|
||||
/// This function serves to be used in preg_replace_callback callback
|
||||
/// functions, in order to load the target page content from the
|
||||
/// database only when needed.
|
||||
/// @note It is absolutely necessary that the newTarget()
|
||||
/// function is called for every new page.
|
||||
private static function getTargetContent() {
|
||||
if ( ! isset( $targetContent ) ) {
|
||||
self::$targetContent = \WikiPage::factory(
|
||||
self::$targetTitle)->getContent();
|
||||
};
|
||||
return self::$targetContent;
|
||||
}
|
||||
|
||||
/// Examines the current target page. Returns true if it may be linked;
|
||||
/// false if not. This depends on the settings
|
||||
/// $wgLinkTitlesCheckRedirect and $wgLinkTitlesEnableNoTargetMagicWord
|
||||
/// and whether the target page is a redirect or contains the
|
||||
/// __NOAUTOLINKTARGET__ magic word.
|
||||
/// @returns boolean
|
||||
private static function checkTargetPage() {
|
||||
global $wgLinkTitlesEnableNoTargetMagicWord;
|
||||
global $wgLinkTitlesCheckRedirect;
|
||||
|
||||
// If checking for redirects is enabled and the target page does
|
||||
// indeed redirect to the current page, return the page title as-is
|
||||
// (unlinked).
|
||||
if ( $wgLinkTitlesCheckRedirect ) {
|
||||
$redirectTitle = self::getTargetContent()->getUltimateRedirectTarget();
|
||||
if ( $redirectTitle && $redirectTitle->equals(self::$currentTitle) ) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
// If the magic word __NOAUTOLINKTARGET__ is enabled and the target
|
||||
// page does indeed contain this magic word, return the page title
|
||||
// as-is (unlinked).
|
||||
if ( $wgLinkTitlesEnableNoTargetMagicWord ) {
|
||||
if ( self::getTargetContent()->matchMagicWord(
|
||||
\MagicWord::get('MAG_LINKTITLES_NOTARGET') ) ) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Builds the delimiter that is used in a regexp to separate
|
||||
/// text that should be parsed from text that should not be
|
||||
/// parsed (e.g. inside existing links etc.)
|
||||
private static function BuildDelimiters() {
|
||||
// Configuration variables need to be defined here as globals.
|
||||
global $wgLinkTitlesParseHeadings;
|
||||
global $wgLinkTitlesSkipTemplates;
|
||||
global $wgLinkTitlesWordStartOnly;
|
||||
global $wgLinkTitlesWordEndOnly;
|
||||
|
||||
// Use unicode character properties rather than \b escape sequences
|
||||
// to detect whole words containing non-ASCII characters as well.
|
||||
// Note that this requires the use of the '/u' switch, and you need
|
||||
// to have PHP with a PCRE library that was compiled with
|
||||
// --enable-unicode-properties
|
||||
( $wgLinkTitlesWordStartOnly ) ? self::$wordStartDelim = '(?<!\pL)' : self::$wordStartDelim = '';
|
||||
( $wgLinkTitlesWordEndOnly ) ? self::$wordEndDelim = '(?!\pL)' : self::$wordEndDelim = '';
|
||||
|
||||
if ( $wgLinkTitlesSkipTemplates )
|
||||
{
|
||||
$templatesDelimiter = '{{[^}]+}}|';
|
||||
} else {
|
||||
// Match template names (ignoring any piped [[]] links in them)
|
||||
// along with the trailing pipe and parameter name or closing
|
||||
// braces; also match sequences of '|wordcharacters=' (without
|
||||
// spaces in them) that usually only occur as parameter names in
|
||||
// transclusions (but could also occur as wiki table cell contents).
|
||||
// TODO: Find a way to match parameter names in transclusions, but
|
||||
// not in table cells or other sequences involving a pipe character
|
||||
// and equal sign.
|
||||
$templatesDelimiter = '{{[^|]*?(?:(?:\[\[[^]]+]])?)[^|]*?(?:\|(?:\w+=)?|(?:}}))|\|\w+=|';
|
||||
}
|
||||
|
||||
// Build a regular expression that will capture existing wiki links ("[[...]]"),
|
||||
// wiki headings ("= ... =", "== ... ==" etc.),
|
||||
// urls ("http://example.com", "[http://example.com]", "[http://example.com Description]",
|
||||
// and email addresses ("mail@example.com").
|
||||
// Since there is a user option to skip headings, we make this part of the expression
|
||||
// optional. Note that in order to use preg_split(), it is important to have only one
|
||||
// capturing subpattern (which precludes the use of conditional subpatterns).
|
||||
( $wgLinkTitlesParseHeadings ) ? $delimiter = '' : $delimiter = '=+.+?=+|';
|
||||
$urlPattern = '[a-z]+?\:\/\/(?:\S+\.)+\S+(?:\/.*)?';
|
||||
self::$delimiter = '/(' . // exclude from linking:
|
||||
'\[\[.*?\]\]|' . // links
|
||||
$delimiter . // titles (if requested)
|
||||
$templatesDelimiter . // templates (if requested)
|
||||
'^ .+?\n|\n .+?\n|\n .+?$|^ .+?$|' . // preformatted text
|
||||
'<nowiki>.*?<.nowiki>|<code>.*?<\/code>|' . // nowiki/code
|
||||
'<pre>.*?<\/pre>|<html>.*?<\/html>|' . // pre/html
|
||||
'<script>.*?<\/script>|' . // script
|
||||
'<div.+?>|<\/div>|' . // attributes of div elements
|
||||
'<span.+?>|<\/span>|' . // attributes of span elements
|
||||
'<file>[^<]*<\/file>|' . // stuff inside file elements
|
||||
'style=".+?"|class=".+?"|' . // styles and classes (e.g. of wikitables)
|
||||
'\[' . $urlPattern . '\s.+?\]|'. $urlPattern . '(?=\s|$)|' . // urls
|
||||
'(?<=\b)\S+\@(?:\S+\.)+\S+(?=\b)' . // email addresses
|
||||
')/ismS';
|
||||
}
|
||||
}
|
||||
|
||||
// vim: ts=2:sw=2:noet:comments^=\:///
|
31
includes/LinkTitles_Magic.php
Normal file
31
includes/LinkTitles_Magic.php
Normal file
@ -0,0 +1,31 @@
|
||||
<?php
|
||||
/*
|
||||
* Copyright 2012-2016 Daniel Kraus <bovender@bovender.de> ('bovender')
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
* MA 02110-1301, USA.
|
||||
*/
|
||||
/*! @file LinkTitles_Magic.php
|
||||
*/
|
||||
|
||||
/// Holds the two magic words that the extension provides.
|
||||
$magicWords = array();
|
||||
|
||||
/// Default magic words in English.
|
||||
$magicWords['en'] = array(
|
||||
'MAG_LINKTITLES_NOAUTOLINKS' => array(0, '__NOAUTOLINKS__'),
|
||||
'MAG_LINKTITLES_NOTARGET' => array(0, '__NOAUTOLINKTARGET__')
|
||||
);
|
||||
|
296
includes/LinkTitles_Special.php
Normal file
296
includes/LinkTitles_Special.php
Normal file
@ -0,0 +1,296 @@
|
||||
<?php
|
||||
/*
|
||||
* Copyright 2012-2016 Daniel Kraus <bovender@bovender.de> ('bovender')
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
* MA 02110-1301, USA.
|
||||
*/
|
||||
namespace LinkTitles;
|
||||
/// @defgroup batch Batch processing
|
||||
|
||||
/// @cond
|
||||
if ( !defined( 'MEDIAWIKI' ) ) {
|
||||
die( 'Not an entry point.' );
|
||||
}
|
||||
/// @endcond
|
||||
|
||||
/// Provides a special page that can be used to batch-process all pages in
|
||||
/// the wiki. By default, this can only be performed by sysops.
|
||||
/// @ingroup batch
|
||||
class Special extends \SpecialPage {
|
||||
|
||||
/// Constructor. Announces the special page title and required user right
|
||||
/// to the parent constructor.
|
||||
function __construct() {
|
||||
// the second parameter in the following function call ensures that only
|
||||
// users who have the 'linktitles-batch' right get to see this page (by
|
||||
// default, this are all sysop users).
|
||||
parent::__construct( 'LinkTitles', 'linktitles-batch' );
|
||||
}
|
||||
|
||||
function getGroupName() {
|
||||
return 'pagetools';
|
||||
}
|
||||
|
||||
/// Entry function of the special page class. Will abort if the user does
|
||||
/// not have appropriate permissions ('linktitles-batch').
|
||||
/// @return undefined
|
||||
function execute($par) {
|
||||
// Prevent non-authorized users from executing the batch processing.
|
||||
if ( !$this->userCanExecute( $this->getUser() ) ) {
|
||||
$this->displayRestrictionError();
|
||||
return;
|
||||
}
|
||||
|
||||
$request = $this->getRequest();
|
||||
$output = $this->getOutput();
|
||||
$this->setHeaders();
|
||||
|
||||
// Determine whether this page was requested via GET or POST.
|
||||
// If GET, display information and a button to start linking.
|
||||
// If POST, start or continue the linking process.
|
||||
if ( $request->wasPosted() ) {
|
||||
if ( array_key_exists( 's', $request->getValues() ) ) {
|
||||
$this->process( $request, $output );
|
||||
}
|
||||
else
|
||||
{
|
||||
$this->buildInfoPage( $request, $output );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
$this->buildInfoPage( $request, $output );
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes wiki articles, starting at the page indicated by
|
||||
/// $startTitle. If $wgLinkTitlesTimeLimit is reached before all pages are
|
||||
/// processed, returns the title of the next page that needs processing.
|
||||
/// @param WebRequest $request WebRequest object that is associated with the special
|
||||
/// page.
|
||||
/// @param OutputPage $output Output page for the special page.
|
||||
private function process( \WebRequest &$request, \OutputPage &$output) {
|
||||
global $wgLinkTitlesTimeLimit;
|
||||
|
||||
// Start the stopwatch
|
||||
$startTime = microtime(true);
|
||||
|
||||
// Connect to the database
|
||||
$dbr = wfGetDB( DB_SLAVE );
|
||||
|
||||
// Fetch the start index and max number of records from the POST
|
||||
// request.
|
||||
$postValues = $request->getValues();
|
||||
|
||||
// Convert the start index to an integer; this helps preventing
|
||||
// SQL injection attacks via forged POST requests.
|
||||
$start = intval($postValues['s']);
|
||||
|
||||
// If an end index was given, we don't need to query the database
|
||||
if ( array_key_exists('e', $postValues) ) {
|
||||
$end = intval($postValues['e']);
|
||||
}
|
||||
else
|
||||
{
|
||||
// No end index was given. Therefore, count pages now.
|
||||
$end = $this->countPages($dbr);
|
||||
};
|
||||
|
||||
array_key_exists('r', $postValues) ?
|
||||
$reloads = $postValues['r'] :
|
||||
$reloads = 0;
|
||||
|
||||
// Retrieve page names from the database.
|
||||
$res = $dbr->select(
|
||||
'page',
|
||||
'page_title',
|
||||
array(
|
||||
'page_namespace = 0',
|
||||
),
|
||||
__METHOD__,
|
||||
array(
|
||||
'LIMIT' => 999999999,
|
||||
'OFFSET' => $start
|
||||
)
|
||||
);
|
||||
|
||||
// Iterate through the pages; break if a time limit is exceeded.
|
||||
foreach ( $res as $row ) {
|
||||
$curTitle = $row->page_title;
|
||||
Extension::processPage( $curTitle, $this->getContext() );
|
||||
$start += 1;
|
||||
|
||||
// Check if the time limit is exceeded
|
||||
if ( microtime(true)-$startTime > $wgLinkTitlesTimeLimit )
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
$this->addProgressInfo($output, $curTitle, $start, $end);
|
||||
|
||||
// If we have not reached the last page yet, produce code to reload
|
||||
// the extension's special page.
|
||||
if ( $start < $end )
|
||||
{
|
||||
$reloads += 1;
|
||||
// Build a form with hidden values and output JavaScript code that
|
||||
// immediately submits the form in order to continue the process.
|
||||
$output->addHTML($this->getReloaderForm($request->getRequestURL(),
|
||||
$start, $end, $reloads));
|
||||
}
|
||||
else // Last page has been processed
|
||||
{
|
||||
$this->addCompletedInfo($output, $start, $end, $reloads);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds WikiText to the output containing information about the extension
|
||||
/// and a form and button to start linking.
|
||||
private function buildInfoPage( &$request, &$output ) {
|
||||
$url = $request->getRequestURL();
|
||||
|
||||
// TODO: Put the page contents in messages in the i18n file.
|
||||
$output->addWikiText(
|
||||
<<<EOF
|
||||
LinkTitles extension: http://www.mediawiki.org/wiki/Extension:LinkTitles
|
||||
|
||||
Source code: http://github.com/bovender/LinkTitles
|
||||
|
||||
== Batch Linking ==
|
||||
You can start a batch linking process by clicking on the button below.
|
||||
This will go through every page in the normal namespace of your Wiki and
|
||||
insert links automatically. This page will repeatedly reload itself, in
|
||||
order to prevent blocking the server. To interrupt the process, simply
|
||||
close this page.
|
||||
EOF
|
||||
);
|
||||
$output->addHTML(
|
||||
<<<EOF
|
||||
<form method="post" action="${url}">
|
||||
<input type="submit" value="Start linking" />
|
||||
<input type="hidden" name="s" value="0" />
|
||||
</form>
|
||||
EOF
|
||||
);
|
||||
}
|
||||
|
||||
/// Produces informative output in WikiText format to show while working.
|
||||
/// @param $output Output object.
|
||||
/// @param $curTitle Title of the currently processed page.
|
||||
/// @param $index Index of the currently processed page.
|
||||
/// @param $end Last index that will be processed (i.e., number of
|
||||
/// pages).
|
||||
private function addProgressInfo( &$output, $curTitle, $index, $end ) {
|
||||
$progress = $index / $end * 100;
|
||||
$percent = sprintf("%01.1f", $progress);
|
||||
|
||||
$output->addWikiText(
|
||||
<<<EOF
|
||||
== Processing pages... ==
|
||||
The [http://www.mediawiki.org/wiki/Extension:LinkTitles LinkTitles]
|
||||
extension is currently going through every page of your wiki, adding links to
|
||||
existing pages as appropriate.
|
||||
|
||||
=== Current page: $curTitle ===
|
||||
EOF
|
||||
);
|
||||
$output->addHTML(
|
||||
<<<EOF
|
||||
<p>Page ${index} of ${end}.</p>
|
||||
<div style="width:100%; padding:2px; border:1px solid #000; position: relative;
|
||||
margin-bottom:16px;">
|
||||
<span style="position: absolute; left: 50%; font-weight:bold; color:#555;">
|
||||
${percent}%
|
||||
</span>
|
||||
<div style="width:${progress}%; background-color:#bbb; height:20px; margin:0;"></div>
|
||||
</div>
|
||||
EOF
|
||||
);
|
||||
$output->addWikiText(
|
||||
<<<EOF
|
||||
=== To abort, close this page, or hit the 'Stop' button in your browser ===
|
||||
[[Special:LinkTitles|Return to Special:LinkTitles.]]
|
||||
EOF
|
||||
);
|
||||
}
|
||||
|
||||
/// Generates an HTML form and JavaScript to automatically submit the
|
||||
/// form.
|
||||
/// @param $url URL to reload with a POST request.
|
||||
/// @param $start Index of the next page that shall be processed.
|
||||
/// @param $end Index of the last page to be processed.
|
||||
/// @param $reloads Counter that holds the number of reloads so far.
|
||||
/// @returns String that holds the HTML for a form and a
|
||||
/// JavaScript command.
|
||||
private function getReloaderForm( $url, $start, $end, $reloads ) {
|
||||
return
|
||||
<<<EOF
|
||||
<form method="post" name="linktitles" action="${url}">
|
||||
<input type="hidden" name="s" value="${start}" />
|
||||
<input type="hidden" name="e" value="${end}" />
|
||||
<input type="hidden" name="r" value="${reloads}" />
|
||||
</form>
|
||||
<script type="text/javascript">
|
||||
document.linktitles.submit();
|
||||
</script>
|
||||
EOF
|
||||
;
|
||||
}
|
||||
|
||||
/// Adds statistics to the page when all processing is done.
|
||||
/// @param $output Output object
|
||||
/// @param $start Index of the first page that was processed.
|
||||
/// @param $end Index of the last processed page.
|
||||
/// @param $reloads Number of reloads of the page.
|
||||
/// @returns undefined
|
||||
private function addCompletedInfo( &$output, $start, $end, $reloads ) {
|
||||
global $wgLinkTitlesTimeLimit;
|
||||
$pagesPerReload = sprintf('%0.1f', $end / $reloads);
|
||||
$output->addWikiText(
|
||||
<<<EOF
|
||||
== Batch processing completed! ==
|
||||
{| class="wikitable"
|
||||
|-
|
||||
| total number of pages: || ${end}
|
||||
|-
|
||||
| timeout setting [s]: || ${wgLinkTitlesTimeLimit}
|
||||
|-
|
||||
| webpage reloads: || ${reloads}
|
||||
|-
|
||||
| pages scanned per reload interval: || ${pagesPerReload}
|
||||
|}
|
||||
EOF
|
||||
);
|
||||
}
|
||||
|
||||
/// Counts the number of pages in a read-access wiki database ($dbr).
|
||||
/// @param $dbr Read-only `Database` object.
|
||||
/// @returns Number of pages in the default namespace (0) of the wiki.
|
||||
private function countPages( &$dbr ) {
|
||||
$res = $dbr->select(
|
||||
'page',
|
||||
'page_id',
|
||||
array(
|
||||
'page_namespace = 0',
|
||||
),
|
||||
__METHOD__
|
||||
);
|
||||
return $res->numRows();
|
||||
}
|
||||
}
|
||||
|
||||
// vim: ts=2:sw=2:noet:comments^=\:///
|
Reference in New Issue
Block a user