Replace the first N dots of a string revisited - regex

In January I asked how to replace the first N dots of a string: replace the first N dots of a string
DWin's answer was very helpful. Can it be generalized?
df.1 <- read.table(text = '
my.string other.stuff
1111111111111111 120
..............11 220
11.............. 320
1............... 320
.......1........ 420
................ 820
11111111111111.1 120
', header = TRUE)
nn <- 14
# this works:
df.1$my.string <- sub("^\\.{14}", paste(as.character(rep(0, nn)), collapse = ""),
df.1$my.string)
# this does not work:
df.1$my.string <- sub("^\\.{nn}", paste(as.character(rep(0, nn)), collapse = ""),
df.1$my.string)

Using sprintf you can have the desired output
nn <- 3
sub(sprintf("^\\.{%s}", nn),
paste(rep(0, nn), collapse = ""), df.1$my.string)
## [1] "1111111111111111" "000...........11" "11.............."
## [4] "1..............." "000....1........" "000............."
## [7] "11111111111111.1"

pattstr <- paste0("\\.", paste0( rep(".",nn), collapse="") )
pattstr
#[1] "\\..............."
df.1$my.string <- sub(pattstr,
paste0( rep("0", nn), collapse=""),
df.1$my.string)
> df.1
my.string other.stuff
1 1111111111111111 120
2 000000000000001 220
3 11.............. 320
4 100000000000000 320
5 00000000000000. 420
6 00000000000000. 820
7 11111111111111.1 120

Related

R setdiff() by regex

Is it possible to customize setdiff using regular expressions to see what is in one vector and not another? For example:
x <- c("1\t119\t120\t1\t119\t120\tABC\tDEF\t0", "2\t558\t559\t2\t558\t559\tGHI\tJKL\t0", "3\t139\t141\t3\t139\t141\tMNO\tPQR\t0", "3\t139\t143\t3\t139\t143\tSTU\tVWX\t0")
[1] "1\t119\t120\t1\t119\t120\tABC\tDEF\t0"
[2] "2\t558\t559\t2\t558\t559\tGHI\tJKL\t0"
[3] "3\t139\t141\t3\t139\t141\tMNO\tPQR\t0"
[4] "3\t139\t143\t3\t139\t143\tSTU\tVWX\t0"
y <- c("1\t119\t120\t1\t109\t120\tABC\tDEF\t0", "2\t558\t559\t2\t548\t559\tGHI\tJKL\t0", "3\t139\t141\t3\t129\t141\tMNO\tPQR\t0", "3\t139\t143\t3\t129\t143\tSTU\tVWX\t0", "4\t157\t158\t4\t147\t158\tXWX\tYTY\t0", "5\t158\t159\t5\t148\t159\tPHP\tWZW\t0")
[1] "1\t119\t120\t1\t109\t120\tABC\tDEF\t0"
[2] "2\t558\t559\t2\t548\t559\tGHI\tJKL\t0"
[3] "3\t139\t141\t3\t129\t141\tMNO\tPQR\t0"
[4] "3\t139\t143\t3\t129\t143\tSTU\tVWX\t0"
[5] "4\t157\t158\t4\t147\t158\tXWX\tYTY\t0"
[6] "5\t158\t159\t5\t148\t159\tPHP\tWZW\t0"
I want to be able to show that:
[5] "4\t157\t158\t4\t147\t158\tXWX\tYTY\t0"
[6] "5\t158\t159\t5\t148\t159\tPHP\tWZW\t0"
are new because 4\t157\t158 and 4\t157\t158 are unique to y. This doesn't work:
> setdiff(y,x)
[1] "1\t119\t120\t1\t109\t120\tABC\tDEF\t0" "2\t558\t559\t2\t548\t559\tGHI\tJKL\t0"
[3] "3\t139\t141\t3\t129\t141\tMNO\tPQR\t0" "3\t139\t143\t3\t129\t143\tSTU\tVWX\t0"
[5] "4\t157\t158\t4\t147\t158\tXWX\tYTY\t0" "5\t158\t159\t5\t148\t159\tPHP\tWZW\t0"
Because column 5 is clearly different in both x and y. I want to setdiff only based on the first three columns.
A simple example of setdiff can be found here: How to tell what is in one vector and not another?
One way to do this is to put x and y as data.frames and anti-join. I'll use data.table since I find it more natural.
library(data.table)
xDT <- as.data.table(do.call("rbind", strsplit(x, split = "\t")))
yDT <- as.data.table(do.call("rbind", strsplit(y, split = "\t")))
Now anti-join (a "setdiff" for data.frames/data.tables):
yDT[!xDT, on = paste0("V", 1:3)]
# V1 V2 V3 V4 V5 V6 V7 V8 V9
# 1: 4 157 158 4 147 158 XWX YTY 0
# 2: 5 158 159 5 148 159 PHP WZW 0
You could also get the row index (thanks to #Frank for the suggested improvement/simplification):
> yDT[!xDT, which = TRUE, on = paste0("V", 1:3)]
Or extract it directly from y:
> y[yDT[!xDT, which = TRUE, on = paste0("V", 1:3)]]
# [1] "4\t157\t158\t4\t147\t158\tXWX\tYTY\t0" "5\t158\t159\t5\t148\t159\tPHP\tWZW\t0"
We could also use anti_join from dplyr after reading it with either fread
library(data.table)
library(dplyr)
anti_join(fread(paste(y, collapse='\n')),
fread(paste(x, collapse='\n')), by = c('V1', 'V2', 'V3'))
# V1 V2 V3 V4 V5 V6 V7 V8 V9
# (int) (int) (int) (int) (int) (int) (chr) (chr) (int)
# 1 4 157 158 4 147 158 XWX YTY 0
# 2 5 158 159 5 148 159 PHP WZW 0
Or (as the title requests for regex) we can use regex to remove part of the string and then do the %in%
y[!sub('(([^\t]+\t){3}).*', '\\1', y) %in%
sub('(([^\t]+\t){3}).*', '\\1', x)]
#[1] "4\t157\t158\t4\t147\t158\tXWX\tYTY\t0" "5\t158\t159\t5\t148\t159\tPHP\tWZW\t0"

How can I convert Degree minute sec to Decimal in R?

I have this dataframe:
Lat Long
59 44 50 151 45 11
59 49 28 154 52 56
59 46 42 150 45 15
How can I convert this into decimal columns?
lat is in dd mm ss and long is in ddd mm ss
I found a similar solution here, but couldn't adapt the regex for my case.
Converting geo coordinates from degree to decimal
Try this function:
angle2dec <- function(angle) {
angle <- as.character(angle)
x <- do.call(rbind, strsplit(angle, split=' '))
x <- apply(x, 1L, function(y) {
y <- as.numeric(y)
y[1] + y[2]/60 + y[3]/3600
})
return(x)
}
Then you can apply it to each column in your data frame:
new_df <- apply(df, 2L, angle2dec)
new_df
Lat Long
[1,] 59.74722 151.7531
[2,] 59.82444 154.8822
[3,] 59.77833 150.7542
or just
df$Lat <- angle2dec(df$Lat)
df$Long <- angle2dec(df$Long)
May I suggest the tidyr approach:
df <- data.frame( Lat=c("59 44 50","59 49 28","59 46 42"),
Long=c("151 45 11","154 52 56","150 45 15"))
library(tidyr); library(dplyr)
df %>%
separate(Lat, paste("lat",c("d","m","s"), sep="_") ) %>%
separate(Long, paste("long",c("d","m","s"), sep="_" ) ) %>%
mutate_each(funs(as.numeric)) %>%
transmute(lat_dec=lat_d + lat_m/60 + lat_s/60^2,
long_dec=long_d + long_m/60 + long_s/60^2)
# lat_dec long_dec
# 1 59.74722 151.7531
# 2 59.82444 154.8822
# 3 59.77833 150.7542
Here's an idea using splitstackshape:
library(dplyr)
library(splitstackshape)
df %>%
cSplit(c("Lat", "Long"), sep = " ") %>%
transmute(Lat = Lat_1 + Lat_2 / 60 + Lat_3 / 60^2,
Long = Long_1 + Long_2 / 60 + Long_3 / 60^2)
Which gives:
# Lat Long
#1: 59.74722 151.7531
#2: 59.82444 154.8822
#3: 59.77833 150.7542

Rearranging the structure of many txt files and then merging them in one data frame

I would appreciate your help with this a lot!
I have ~4.5k txt files which look like this:
Simple statistics using MSPA parameters: 8_3_1_1 on input file: 20130815 104359 875 000000 0528 0548_result.tif
MSPA-class [color]: Foreground/data pixels [%] Frequency
============================================================
CORE(s) [green]: -- 0
CORE(m) [green]: 48.43/13.45 1
CORE(l) [green]: -- 0
ISLET [brown]: 3.70/ 1.03 20
PERFORATION [blue]: 0.00/ 0.00 0
EDGE [black]: 30.93/ 8.59 11
LOOP [yellow]: 9.66/ 2.68 6
BRIDGE [red]: 0.00/ 0.00 0
BRANCH [orange]: 7.28/ 2.02 40
Background [grey]: --- /72.22 11
Missing [white]: 0.00 0
I want to read all txt files from a directory into R and then perform a rearranging task on them before merging them together.
The values in the txt files can change, so in places where there is a 0.00 now, could be a relevant number in some files (so we need those). For the fields where there are -- now, it would be good if the script could test if there are -- , or a number. If there are the --, then it should turn them into NAs. On the other hand, real 0.00 values are of value and I need them. There is only one value for the Missing white column (or row here), that value should then be copied into both columns, foreground% and data pixels%.
The general rearranging which I need is to make all the data available as columns with only 1 row per txt file. For every row of data in the txt file here, there should be 3 columns in the output file (foreground%, data pixel% and frequency for every color). The name of the row should be the image name which is mentioned in the beginning of the file, here: 20130815 104359 875 000000 0528 0548
The rest can be omitted.
The output should look something like this:
I am working on this simultaneously but am not sure which direction to take. So any help is more than welcome!
Best,
Moritz
This puts it in the format you want, I think, but the example doesn't match your image so I can't be sure:
(lf <- list.files('~/desktop', pattern = '^image\\d+.txt', full.names = TRUE))
# [1] "/Users/rawr/desktop/image001.txt" "/Users/rawr/desktop/image002.txt"
# [3] "/Users/rawr/desktop/image003.txt"
lapply(lf, function(xx) {
rl <- readLines(con <- file(xx), warn = FALSE)
close(con)
## assuming the file name is after "file: " until the end of the string
## and ends in .tif
img_name <- gsub('.*file:\\s+(.*).tif', '\\1', rl[1])
## removes each string up to and including the ===== string
rl <- rl[-(1:grep('==', rl))]
## remove leading whitespace
rl <- gsub('^\\s+', '', rl)
## split the remaining lines by larger chunks of whitespace
mat <- do.call('rbind', strsplit(rl, '\\s{2, }'))
## more cleaning, setting attributes, etc
mat[mat == '--'] <- NA
mat <- cbind(image_name = img_name, `colnames<-`(t(mat[, 2]), mat[, 1]))
as.data.frame(mat)
})
I created three files using your example and made each one slightly different to show how this would work on a directory with several files:
# [[1]]
# image_name CORE(s) [green]: CORE(m) [green]: CORE(l) [green]: ISLET [brown]: PERFORATION [blue]: EDGE [black]: LOOP [yellow]: BRIDGE [red]: BRANCH [orange]: Background [grey]: Missing [white]:
# 1 20130815 104359 875 000000 0528 0548_result <NA> 48.43/13.45 <NA> 3.70/ 1.03 0.00/ 0.00 30.93/ 8.59 9.66/ 2.68 0.00/ 0.00 7.28/ 2.02 --- /72.22 0.00
#
# [[2]]
# image_name CORE(s) [green]: CORE(m) [green]: CORE(l) [green]: ISLET [brown]: PERFORATION [blue]: EDGE [black]: LOOP [yellow]: BRIDGE [red]: BRANCH [orange]: Background [grey]: Missing [white]:
# 1 20139341 104359 875 000000 0528 0548_result 23 48.43/13.45 23 <NA> 0.00/ 0.00 30.93/ 8.59 9.66/ 2.68 0.00/ 0.00 7.28/ 2.02 --- /72.22 0.00
#
# [[3]]
# image_name CORE(s) [green]: CORE(m) [green]: CORE(l) [green]: ISLET [brown]: PERFORATION [blue]: EDGE [black]: LOOP [yellow]: BRIDGE [red]: BRANCH [orange]: Background [grey]: Missing [white]:
# 1 20132343 104359 875 000000 0528 0548_result <NA> <NA> <NA> <NA> <NA> 30.93/ 8.59 9.66/ 2.68 0.00/ 0.00 7.28/ 2.02 <NA> 0.00
EDIT
made a few changes to extract all the info:
(lf <- list.files('~/desktop', pattern = '^image\\d+.txt', full.names = TRUE))
# [1] "/Users/rawr/desktop/image001.txt" "/Users/rawr/desktop/image002.txt"
# [3] "/Users/rawr/desktop/image003.txt"
res <- lapply(lf, function(xx) {
rl <- readLines(con <- file(xx), warn = FALSE)
close(con)
img_name <- gsub('.*file:\\s+(.*).tif', '\\1', rl[1])
rl <- rl[-(1:grep('==', rl))]
rl <- gsub('^\\s+', '', rl)
mat <- do.call('rbind', strsplit(rl, '\\s{2, }'))
dat <- as.data.frame(mat, stringsAsFactors = FALSE)
tmp <- `colnames<-`(do.call('rbind', strsplit(dat$V2, '[-\\/\\s]+', perl = TRUE)),
c('Foreground','Data pixels'))
dat <- cbind(dat[, -2], tmp, image_name = img_name)
dat[] <- lapply(dat, as.character)
dat[dat == ''] <- NA
names(dat)[1:2] <- c('MSPA-class','Frequency')
zzz <- reshape(dat, direction = 'wide', idvar = 'image_name', timevar = 'MSPA-class')
names(zzz)[-1] <- gsub('(.*)\\.(.*) (?:.*)', '\\2_\\1', names(zzz)[-1], perl = TRUE)
zzz
})
here is the result (I just transformed into a long matrix so it would be easier to read. the real results are in a very wide data frame, one for each file):
`rownames<-`(matrix(res[[1]]), names(res[[1]]))
# [,1]
# image_name "20130815 104359 875 000000 0528 0548_result"
# CORE(s)_Frequency "0"
# CORE(s)_Foreground "NA"
# CORE(s)_Data pixels "NA"
# CORE(m)_Frequency "1"
# CORE(m)_Foreground "48.43"
# CORE(m)_Data pixels "13.45"
# CORE(l)_Frequency "0"
# CORE(l)_Foreground "NA"
# CORE(l)_Data pixels "NA"
# ISLET_Frequency "20"
# ISLET_Foreground "3.70"
# ISLET_Data pixels "1.03"
# PERFORATION_Frequency "0"
# PERFORATION_Foreground "0.00"
# PERFORATION_Data pixels "0.00"
# EDGE_Frequency "11"
# EDGE_Foreground "30.93"
# EDGE_Data pixels "8.59"
# LOOP_Frequency "6"
# LOOP_Foreground "9.66"
# LOOP_Data pixels "2.68"
# BRIDGE_Frequency "0"
# BRIDGE_Foreground "0.00"
# BRIDGE_Data pixels "0.00"
# BRANCH_Frequency "40"
# BRANCH_Foreground "7.28"
# BRANCH_Data pixels "2.02"
# Background_Frequency "11"
# Background_Foreground "NA"
# Background_Data pixels "72.22"
# Missing_Frequency "0"
# Missing_Foreground "0.00"
# Missing_Data pixels "0.00"
with your sample data:
lf <- list.files('~/desktop/data', pattern = '.txt', full.names = TRUE)
`rownames<-`(matrix(res[[1]]), names(res[[1]]))
# [,1]
# image_name "20130815 103704 780 000000 0372 0616"
# CORE(s)_Frequency "0"
# CORE(s)_Foreground "NA"
# CORE(s)_Data pixels "NA"
# CORE(m)_Frequency "1"
# CORE(m)_Foreground "54.18"
# CORE(m)_Data pixels "15.16"
# CORE(l)_Frequency "0"
# CORE(l)_Foreground "NA"
# CORE(l)_Data pixels "NA"
# ISLET_Frequency "11"
# ISLET_Foreground "3.14"
# ISLET_Data pixels "0.88"
# PERFORATION_Frequency "0"
# PERFORATION_Foreground "0.00"
# PERFORATION_Data pixels "0.00"
# EDGE_Frequency "1"
# EDGE_Foreground "34.82"
# EDGE_Data pixels "9.75"
# LOOP_Frequency "1"
# LOOP_Foreground "4.96"
# LOOP_Data pixels "1.39"
# BRIDGE_Frequency "0"
# BRIDGE_Foreground "0.00"
# BRIDGE_Data pixels "0.00"
# BRANCH_Frequency "20"
# BRANCH_Foreground "2.89"
# BRANCH_Data pixels "0.81"
# Background_Frequency "1"
# Background_Foreground "NA"
# Background_Data pixels "72.01"
# Missing_Frequency "0"
# Missing_Foreground "0.00"
# Missing_Data pixels "0.00"
I copied and pasted your data on a text file and adjusted the space in order to have consistency between them. You might want to do it or if you can attach a text file, it would be easy to work with. You may use pastebin - http://en.wikipedia.org/wiki/Pastebin
First set your working directory as follows:
setwd("path of your file")
#EDIT: Create a single data frame of all files
split.row.data <- function(x){
a1 = sub("( )+(.*)", '\\2', x)
b1 = unlist(strsplit(sub("( )+(.*)", '\\2', (strsplit(a1, ":"))[[1]][2]), " "))
c1 = unlist(strsplit(b1[1], "/"))
if(length(c1) == 1){
if(which(b1[1:2] %in% "") == 1){
c1 = c(NA, c1)
}else if(which(b1[1:2] %in% "") == 2){
c1 = c(c1, NA)
}
}
c1[which(c1 %in% c("--", "--- "))] <- NA
return(c(unlist(strsplit(strsplit(a1, ":")[[1]][1], " ")),
c1,
b1[length(b1)]))
}
df2 <- data.frame(matrix(nrow = 1, ncol = 6), stringsAsFactors = FALSE)
file_list = list.files('~/desktop', pattern = '^image\\d+.txt', full.names = TRUE)
for (infile in file_list){
file_data <- readLines(con <- file(infile))
close(con)
filename = sub("(.*)(input file:)(.*)(.tif)", "\\3", file_data[3])
a2 <- file_data[7:length(file_data)]
d1 = lapply(a2, function(x) split.row.data(x))
df1 <- data.frame(matrix(nrow= length(d1), ncol = 5), stringsAsFactors = FALSE)
for(i in 1:length(d1)){df1[i, ] <- d1[[i]]}
df1 <- cbind(data.frame(rep(filename, nrow(df1)), stringsAsFactors = FALSE),
df1)
colnames(df1) <- colnames(df2)
df2 <- rbind(df2, df1)
}
df2 <- df2[2:nrow(df2), ]
df2[,4] <- as.numeric(df2[,4])
df2[,5] <- as.numeric(df2[,5])
df2[,6] <- as.numeric(df2[,6])
e1 = unlist(lapply(df2[,3], function(x) gsub(']', '', x)))
df2[,3] = unlist(lapply(e1, function(x) gsub("[[]", '', x)))
header_names <- unlist(lapply(strsplit(file_data[5], "/"), function(x) strsplit(x, " ")))
colnames(df2) <- c("filename",
strsplit(header_names[1], " ")[[1]][2],
"color",
header_names[2:length(header_names)])
row.names(df2) <- 1:nrow(df2)
output:
print(head(df2))
filename MSPA-class color Foreground data pixels [%] Frequency
1 20130815 103739 599 000000 0944 0788 CORE(s) green NA NA 0
2 20130815 103739 599 000000 0944 0788 CORE(m) green 63.46 17.41 1
3 20130815 103739 599 000000 0944 0788 CORE(l) green NA NA 0
4 20130815 103739 599 000000 0944 0788 ISLET brown 0.00 0.00 0
5 20130815 103739 599 000000 0944 0788 PERFORATION blue 0.00 0.00 0
6 20130815 103739 599 000000 0944 0788 EDGE black 35.00 9.60 1
#get data for only "background" from "MSPA-class" column
df2_background <- df2[which(df2[, "MSPA-class"] %in% "Background"), ]
print(df2_background)
filename MSPA-class color Foreground data pixels [%] Frequency
11 20130815 103739 599 000000 0944 0788 Background grey NA 72.57 1
22 20130815 143233 712 000000 1048 0520 Background grey NA 77.51 1
33 20130902 163929 019 000000 0394 0290 Background grey NA 54.55 6

Splitting column of a data.frame in R using gsub

I have a data.frame called rbp that contains a single column like following:
>rbp
V1
dd_smadV1_39992_0_1
Protein: AGBT(Dm)
Sequence Position
234
290
567
126
Protein: ATF1(Dm)
Sequence Position
534
890
105
34
128
301
Protein: Pox(Dm)
201
875
453
*********************
dd_smadv1_9_02
Protein: foxc2(Mm)
Sequence Position
145
987
345
907
Protein: Lor(Hs)
876
512
I would like to discard the Sequence position and extract only the specific details like the names of the sequence and the corresponding protein names like following:
dd_smadV1_39992_0_1 AGBT(Dm);ATF1(Dm);Pox(Dm)
dd_smadv1_9_02 foxc2(Mm);Lor(Hs)
I tried the following code in R but it failed:
library(gsubfn)
Sub(rbp$V1,"Protein:(.*?) ")
Could anyone guide me please.
Here's one way to to it:
m <- gregexpr("Protein: (.*?)\n", x <- strsplit(paste(rbp$V1, collapse = "\n"), "*********************", fixed = TRUE)[[1]])
proteins <- lapply(regmatches(x, m), function(x) sub("Protein: (.*)\n", "\\1", x))
names <- sub(".*?([A-z0-9_]+)\n.*", "\\1", x)
sprintf("%s %s", names, sapply(proteins, paste, collapse = ";"))
# [1] "dd_smadV1_39992_0_1 AGBT(Dm);ATF1(Dm);Pox(Dm)"
# [2] "dd_smadv1_9_02 foxc2(Mm);Lor(Hs)

Find the location of a character in string

I would like to find the location of a character in a string.
Say: string = "the2quickbrownfoxeswere2tired"
I would like the function to return 4 and 24 -- the character location of the 2s in string.
You can use gregexpr
gregexpr(pattern ='2',"the2quickbrownfoxeswere2tired")
[[1]]
[1] 4 24
attr(,"match.length")
[1] 1 1
attr(,"useBytes")
[1] TRUE
or perhaps str_locate_all from package stringr which is a wrapper for gregexpr stringi::stri_locate_all (as of stringr version 1.0)
library(stringr)
str_locate_all(pattern ='2', "the2quickbrownfoxeswere2tired")
[[1]]
start end
[1,] 4 4
[2,] 24 24
note that you could simply use stringi
library(stringi)
stri_locate_all(pattern = '2', "the2quickbrownfoxeswere2tired", fixed = TRUE)
Another option in base R would be something like
lapply(strsplit(x, ''), function(x) which(x == '2'))
should work (given a character vector x)
Here's another straightforward alternative.
> which(strsplit(string, "")[[1]]=="2")
[1] 4 24
You can make the output just 4 and 24 using unlist:
unlist(gregexpr(pattern ='2',"the2quickbrownfoxeswere2tired"))
[1] 4 24
find the position of the nth occurrence of str2 in str1(same order of parameters as Oracle SQL INSTR), returns 0 if not found
instr <- function(str1,str2,startpos=1,n=1){
aa=unlist(strsplit(substring(str1,startpos),str2))
if(length(aa) < n+1 ) return(0);
return(sum(nchar(aa[1:n])) + startpos+(n-1)*nchar(str2) )
}
instr('xxabcdefabdddfabx','ab')
[1] 3
instr('xxabcdefabdddfabx','ab',1,3)
[1] 15
instr('xxabcdefabdddfabx','xx',2,1)
[1] 0
To only find the first locations, use lapply() with min():
my_string <- c("test1", "test1test1", "test1test1test1")
unlist(lapply(gregexpr(pattern = '1', my_string), min))
#> [1] 5 5 5
# or the readable tidyverse form
my_string %>%
gregexpr(pattern = '1') %>%
lapply(min) %>%
unlist()
#> [1] 5 5 5
To only find the last locations, use lapply() with max():
unlist(lapply(gregexpr(pattern = '1', my_string), max))
#> [1] 5 10 15
# or the readable tidyverse form
my_string %>%
gregexpr(pattern = '1') %>%
lapply(max) %>%
unlist()
#> [1] 5 10 15
You could use grep as well:
grep('2', strsplit(string, '')[[1]])
#4 24