rm_url(text.var, trim = !extract, clean = TRUE, pattern = "@rm_url", replacement = "", extract = FALSE, dictionary = getOption("regex.library"), ...)rm_twitter_url(text.var, trim = !extract, clean = TRUE, pattern = "@rm_twitter_url", replacement = "", extract = FALSE, dictionary = getOption("regex.library"), ...)
TRUE
removes leading and trailing white
spaces.TRUE
extra white spaces and escaped
character will be removed.fixed = TRUE
) to be matched in the given
character vector. Default, @rm_url
uses the
rm_url
regex from the regular expression dictionary from
the dictionary
argument.pattern
.TRUE
the URLs are extracted into a list
of vectors.pattern
begins with "@rm_"
.gsub
.rm_url
- Remove/replace/extract URLs from a string.
rm_twitter_url
- Remove/replace/extract Twitter Short URLs from a
string.
The default regex pattern "(http[^ ]*)|(www\.[^ ]*)"
is more
liberal. More constrained versions can be accessed
via pattern = "@rm_url2"
& pattern = "@rm_url3"
see
Examples).
The more constrained url regular expressions ("@rm_url2"
and "@rm_url3"
was adapted from imme_emosol's response:
https://mathiasbynens.be/demo/url-regex
x <- " I like www.talkstats.com and http://stackoverflow.com" rm_url(x)[1] "I like and"rm_url(x, replacement = '<a href="\\1" target="_blank">\\1</a>')[1] "I like <a href=\"\" target=\"_blank\"></a> and <a href=\"http://stackoverflow.com\" target=\"_blank\">http://stackoverflow.com</a>"rm_url(x, extract=TRUE)[[1]] [1] "www.talkstats.com" "http://stackoverflow.com"rm_url(x, pattern = "@rm_url2", extract=TRUE)[[1]] [1] "www.talkstats.com" "http://stackoverflow.com"rm_url(x, pattern = "@rm_url3", extract=TRUE)[[1]] [1] "http://stackoverflow.com"## Remove Twitter Short URL x <- c("download file from http://example.com", "this is the link to my website http://example.com", "go to http://example.com from more info.", "Another url ftp://www.example.com", "And https://www.example.net", "twitter type: t.co/N1kq0F26tG", "still another one https://t.co/N1kq0F26tG :-)") rm_twitter_url(x)[1] "download file from http://example.com" "this is the link to my website http://example.com" [3] "go to http://example.com from more info." "Another url ftp://www.example.com" [5] "And https://www.example.net" "twitter type:" [7] "still another one :-)"rm_twitter_url(x, extract=TRUE)[[1]] [1] NA [[2]] [1] NA [[3]] [1] NA [[4]] [1] NA [[5]] [1] NA [[6]] [1] "t.co/N1kq0F26tG" [[7]] [1] "https://t.co/N1kq0F26tG"## Combine removing Twitter URLs and standard URLs rm_twitter_n_url <- rm_(pattern=pastex("@rm_twitter_url", "@rm_url")) rm_twitter_n_url(x)[1] "download file from" "this is the link to my website" "go to from more info." [4] "Another url" "And" "twitter type:" [7] "still another one :-)"rm_twitter_n_url(x, extract=TRUE)[[1]] [1] "http://example.com" [[2]] [1] "http://example.com" [[3]] [1] "http://example.com" [[4]] [1] "ftp://www.example.com" [[5]] [1] "https://www.example.net" [[6]] [1] "t.co/N1kq0F26tG" [[7]] [1] "https://t.co/N1kq0F26tG"
gsub
,
stri_extract_all_regex
Other rm_.functions: as_numeric
,
as_numeric2
, rm_number
;
as_time
, as_time2
,
rm_time
, rm_transcript_time
;
rm_abbreviation
; rm_angle
,
rm_bracket
,
rm_bracket_multiple
,
rm_curly
, rm_round
,
rm_square
; rm_between
,
rm_between_multiple
;
rm_caps_phrase
; rm_caps
;
rm_citation_tex
; rm_citation
;
rm_city_state_zip
;
rm_city_state
; rm_date
;
rm_default
; rm_dollar
;
rm_email
; rm_emoticon
;
rm_endmark
; rm_hash
;
rm_nchar_words
; rm_non_ascii
;
rm_non_words
; rm_percent
;
rm_phone
; rm_postal_code
;
rm_repeated_characters
;
rm_repeated_phrases
;
rm_repeated_words
; rm_tag
;
rm_title_name
; rm_white
,
rm_white_bracket
,
rm_white_colon
,
rm_white_comma
,
rm_white_endmark
,
rm_white_lead
,
rm_white_lead_trail
,
rm_white_multiple
,
rm_white_punctuation
,
rm_white_trail
; rm_zip