d3a2ed85fe
* Use "LEFT JOIN" to always fetch the item. Needed for update routines. * New conversion routine that now covers every item * Post update is now activated * We now use a hash based upon RIPEMD-320 for content and activity * The hash doesn't contain the plink anymore * Legacy item fields are now "null"able * New hash function for a server unique item hash * Introduction of the legacy mode (usage of old item fields) * Code simplification * We don't need the "uri" fields anymore in item-activity and item-content * Use the "created" and not the "received" date for the hash * Avoiding several notices * Some more warnings removed * Improved uri-hash / Likes on Diaspora are now getting a creation date * Corrected the post update version * Ensure an unique uri-hash * Don't delete orhaned item data at the moment * Partly reworked, due to strange behaviour * Some more parts reworked * Using the uri currently seems to be more reliable * Using the uri here as well * Use the hash values again * Grouped item fields in different categories * Notices again * use the gravity (we always should) * Added hint for disabled post updates * Notices ... * Issue #5337: Personal notes are displayed again * Use the gravity again
157 lines
3.8 KiB
PHP
157 lines
3.8 KiB
PHP
<?php
|
|
|
|
/**
|
|
* @file mod/parse_url.php
|
|
* @brief The parse_url module
|
|
*
|
|
* This module does parse an url for embeddable content (audio, video, image files or link)
|
|
* information and does format this information to BBCode
|
|
*
|
|
* @see ParseUrl::getSiteinfo() for more information about scraping embeddable content
|
|
*/
|
|
|
|
use Friendica\App;
|
|
use Friendica\Core\Addon;
|
|
use Friendica\Util\Network;
|
|
use Friendica\Util\ParseUrl;
|
|
|
|
require_once("include/items.php");
|
|
|
|
function parse_url_content(App $a) {
|
|
|
|
$text = null;
|
|
$str_tags = "";
|
|
|
|
$br = "\n";
|
|
|
|
if (x($_GET,"binurl")) {
|
|
$url = trim(hex2bin($_GET["binurl"]));
|
|
} else {
|
|
$url = trim($_GET["url"]);
|
|
}
|
|
|
|
if ($_GET["title"]) {
|
|
$title = strip_tags(trim($_GET["title"]));
|
|
}
|
|
|
|
if ($_GET["description"]) {
|
|
$text = strip_tags(trim($_GET["description"]));
|
|
}
|
|
|
|
if ($_GET["tags"]) {
|
|
$arr_tags = ParseUrl::convertTagsToArray($_GET["tags"]);
|
|
if (count($arr_tags)) {
|
|
$str_tags = $br . implode(" ", $arr_tags) . $br;
|
|
}
|
|
}
|
|
|
|
// Add url scheme if it is missing
|
|
$arrurl = parse_url($url);
|
|
if (!x($arrurl, "scheme")) {
|
|
if (x($arrurl, "host")) {
|
|
$url = "http:".$url;
|
|
} else {
|
|
$url = "http://".$url;
|
|
}
|
|
}
|
|
|
|
logger("prse_url: " . $url);
|
|
|
|
// Check if the URL is an image, video or audio file. If so format
|
|
// the URL with the corresponding BBCode media tag
|
|
$redirects = 0;
|
|
// Fetch the header of the URL
|
|
$result = Network::curl($url, false, $redirects, ["novalidate" => true, "nobody" => true]);
|
|
if($result["success"]) {
|
|
// Convert the header fields into an array
|
|
$hdrs = [];
|
|
$h = explode("\n", $result["header"]);
|
|
foreach ($h as $l) {
|
|
$header = array_map("trim", explode(":", trim($l), 2));
|
|
if (count($header) == 2) {
|
|
list($k,$v) = $header;
|
|
$hdrs[$k] = $v;
|
|
}
|
|
}
|
|
if (array_key_exists("Content-Type", $hdrs)) {
|
|
$type = $hdrs["Content-Type"];
|
|
}
|
|
if ($type) {
|
|
if(stripos($type, "image/") !== false) {
|
|
echo $br . "[img]" . $url . "[/img]" . $br;
|
|
killme();
|
|
}
|
|
if (stripos($type, "video/") !== false) {
|
|
echo $br . "[video]" . $url . "[/video]" . $br;
|
|
killme();
|
|
}
|
|
if (stripos($type, "audio/") !== false) {
|
|
echo $br . "[audio]" . $url . "[/audio]" . $br;
|
|
killme();
|
|
}
|
|
}
|
|
}
|
|
|
|
$template = "[bookmark=%s]%s[/bookmark]%s";
|
|
|
|
$arr = ["url" => $url, "text" => ""];
|
|
|
|
Addon::callHooks("parse_link", $arr);
|
|
|
|
if (strlen($arr["text"])) {
|
|
echo $arr["text"];
|
|
killme();
|
|
}
|
|
|
|
// If there is allready some content information submitted we don't
|
|
// need to parse the url for content.
|
|
if ($url && $title && $text) {
|
|
|
|
$title = str_replace(["\r","\n"],["",""],$title);
|
|
|
|
$text = "[quote]" . trim($text) . "[/quote]" . $br;
|
|
|
|
$result = sprintf($template, $url, ($title) ? $title : $url, $text) . $str_tags;
|
|
|
|
logger("parse_url (unparsed): returns: " . $result);
|
|
|
|
echo $result;
|
|
killme();
|
|
}
|
|
|
|
// Fetch the information directly from the webpage
|
|
$siteinfo = ParseUrl::getSiteinfo($url);
|
|
|
|
unset($siteinfo["keywords"]);
|
|
|
|
// Format it as BBCode attachment
|
|
$info = add_page_info_data($siteinfo);
|
|
|
|
echo $info;
|
|
|
|
killme();
|
|
}
|
|
|
|
/**
|
|
* @brief Legacy function to call ParseUrl::getSiteinfoCached
|
|
*
|
|
* Note: We have moved the function to ParseUrl.php. This function is only for
|
|
* legacy support and will be remove in the future
|
|
*
|
|
* @param type $url The url of the page which should be scraped
|
|
* @param type $no_guessing If true the parse doens't search for
|
|
* preview pictures
|
|
* @param type $do_oembed The false option is used by the function fetch_oembed()
|
|
* to avoid endless loops
|
|
*
|
|
* @return array which contains needed data for embedding
|
|
*
|
|
* @see ParseUrl::getSiteinfoCached()
|
|
*
|
|
* @todo Remove this function after all Addons has been changed to use
|
|
* ParseUrl::getSiteinfoCached
|
|
*/
|
|
function parseurl_getsiteinfo_cached($url, $no_guessing = false, $do_oembed = true) {
|
|
$siteinfo = ParseUrl::getSiteinfoCached($url, $no_guessing, $do_oembed);
|
|
return $siteinfo;
|
|
}
|