GLSL: pow vs multiplication for integer exponent - opengl

Which is faster in GLSL:
pow(x, 3.0f);
or
x*x*x;
?
Does exponentiation performance depend on hardware vendor or exponent value?

I wrote a small benchmark, because I was interested in the results.
In my personal case, I was most interested in exponent = 5.
Benchmark code (running in Rem's Studio / LWJGL):
package me.anno.utils.bench
import me.anno.gpu.GFX
import me.anno.gpu.GFX.flat01
import me.anno.gpu.RenderState
import me.anno.gpu.RenderState.useFrame
import me.anno.gpu.framebuffer.Frame
import me.anno.gpu.framebuffer.Framebuffer
import me.anno.gpu.hidden.HiddenOpenGLContext
import me.anno.gpu.shader.Renderer
import me.anno.gpu.shader.Shader
import me.anno.utils.types.Floats.f2
import org.lwjgl.opengl.GL11.*
import java.nio.ByteBuffer
import kotlin.math.roundToInt
fun main() {
fun createShader(code: String) = Shader(
"", null, "" +
"attribute vec2 attr0;\n" +
"void main(){\n" +
" gl_Position = vec4(attr0*2.0-1.0, 0.0, 1.0);\n" +
" uv = attr0;\n" +
"}", "varying vec2 uv;\n", "" +
"void main(){" +
code +
"}"
)
fun repeat(code: String, times: Int): String {
return Array(times) { code }.joinToString("\n")
}
val size = 512
val warmup = 50
val benchmark = 1000
HiddenOpenGLContext.setSize(size, size)
HiddenOpenGLContext.createOpenGL()
val buffer = Framebuffer("", size, size, 1, 1, true, Framebuffer.DepthBufferType.NONE)
println("Power,Multiplications,GFlops-multiplication,GFlops-floats,GFlops-ints,GFlops-power,Speedup")
useFrame(buffer, Renderer.colorRenderer) {
RenderState.blendMode.use(me.anno.gpu.blending.BlendMode.ADD) {
for (power in 2 until 100) {
// to reduce the overhead of other stuff
val repeats = 100
val init = "float x1 = dot(uv, vec2(1.0)),x2,x4,x8,x16,x32,x64;\n"
val end = "gl_FragColor = vec4(x1,x1,x1,x1);\n"
val manualCode = StringBuilder()
for (bit in 1 until 32) {
val p = 1.shl(bit)
val h = 1.shl(bit - 1)
if (power == p) {
manualCode.append("x1=x$h*x$h;")
break
} else if (power > p) {
manualCode.append("x$p=x$h*x$h;")
} else break
}
if (power.and(power - 1) != 0) {
// not a power of two, so the result isn't finished yet
manualCode.append("x1=")
var first = true
for (bit in 0 until 32) {
val p = 1.shl(bit)
if (power.and(p) != 0) {
if (!first) {
manualCode.append('*')
} else first = false
manualCode.append("x$p")
}
}
manualCode.append(";\n")
}
val multiplications = manualCode.count { it == '*' }
// println("$power: $manualCode")
val shaders = listOf(
// manually optimized
createShader(init + repeat(manualCode.toString(), repeats) + end),
// can be optimized
createShader(init + repeat("x1=pow(x1,$power.0);", repeats) + end),
// can be optimized, int as power
createShader(init + repeat("x1=pow(x1,$power);", repeats) + end),
// slightly different, so it can't be optimized
createShader(init + repeat("x1=pow(x1,${power}.01);", repeats) + end),
)
for (shader in shaders) {
shader.use()
}
val pixels = ByteBuffer.allocateDirect(4)
Frame.bind()
glClearColor(0f, 0f, 0f, 1f)
glClear(GL_COLOR_BUFFER_BIT or GL_DEPTH_BUFFER_BIT)
for (i in 0 until warmup) {
for (shader in shaders) {
shader.use()
flat01.draw(shader)
}
}
val flops = DoubleArray(shaders.size)
val avg = 10 // for more stability between runs
for (j in 0 until avg) {
for (index in shaders.indices) {
val shader = shaders[index]
GFX.check()
val t0 = System.nanoTime()
for (i in 0 until benchmark) {
shader.use()
flat01.draw(shader)
}
// synchronize
glReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixels)
GFX.check()
val t1 = System.nanoTime()
// the first one may be an outlier
if (j > 0) flops[index] += multiplications * repeats.toDouble() * benchmark.toDouble() * size * size / (t1 - t0)
GFX.check()
}
}
for (i in flops.indices) {
flops[i] /= (avg - 1.0)
}
println(
"" +
"$power,$multiplications," +
"${flops[0].roundToInt()}," +
"${flops[1].roundToInt()}," +
"${flops[2].roundToInt()}," +
"${flops[3].roundToInt()}," +
(flops[0] / flops[3]).f2()
)
}
}
}
}
The sampler function is run 9x 512² pixels * 1000 times, and evaluates the function 100 times each.
I run this code on my RX 580, 8GB from Gigabyte, and collected the following results:
Power
#Mult
GFlops*
GFlopsFp
GFlopsInt
GFlopsPow
Speedup
2
1
1246
1429
1447
324
3.84
3
2
2663
2692
2708
651
4.09
4
2
2682
2679
2698
650
4.12
5
3
2766
972
974
973
2.84
6
3
2785
978
974
976
2.85
7
4
2830
1295
1303
1299
2.18
8
3
2783
2792
2809
960
2.90
9
4
2836
1298
1301
1302
2.18
10
4
2833
1291
1302
1298
2.18
11
5
2858
1623
1629
1623
1.76
12
4
2824
1302
1295
1303
2.17
13
5
2866
1628
1624
1626
1.76
14
5
2869
1614
1623
1611
1.78
15
6
2886
1945
1943
1953
1.48
16
4
2821
1305
1300
1305
2.16
17
5
2868
1615
1625
1619
1.77
18
5
2858
1620
1625
1624
1.76
19
6
2890
1949
1946
1949
1.48
20
5
2871
1618
1627
1625
1.77
21
6
2879
1945
1947
1943
1.48
22
6
2886
1944
1949
1952
1.48
23
7
2901
2271
2269
2268
1.28
24
5
2872
1621
1628
1624
1.77
25
6
2886
1942
1943
1942
1.49
26
6
2880
1949
1949
1953
1.47
27
7
2891
2273
2263
2266
1.28
28
6
2883
1949
1946
1953
1.48
29
7
2910
2279
2281
2279
1.28
30
7
2899
2272
2276
2277
1.27
31
8
2906
2598
2595
2596
1.12
32
5
2872
1621
1625
1622
1.77
33
6
2901
1953
1942
1949
1.49
34
6
2895
1948
1939
1944
1.49
35
7
2895
2274
2266
2268
1.28
36
6
2881
1937
1944
1948
1.48
37
7
2894
2277
2270
2280
1.27
38
7
2902
2275
2264
2273
1.28
39
8
2910
2602
2594
2603
1.12
40
6
2877
1945
1947
1945
1.48
41
7
2892
2276
2277
2282
1.27
42
7
2887
2271
2272
2273
1.27
43
8
2912
2599
2606
2599
1.12
44
7
2910
2278
2284
2276
1.28
45
8
2920
2597
2601
2600
1.12
46
8
2920
2600
2601
2590
1.13
47
9
2925
2921
2926
2927
1.00
48
6
2885
1935
1955
1956
1.47
49
7
2901
2271
2279
2288
1.27
50
7
2904
2281
2276
2278
1.27
51
8
2919
2608
2594
2607
1.12
52
7
2902
2282
2270
2273
1.28
53
8
2903
2598
2602
2598
1.12
54
8
2918
2602
2602
2604
1.12
55
9
2932
2927
2924
2936
1.00
56
7
2907
2284
2282
2281
1.27
57
8
2920
2606
2604
2610
1.12
58
8
2913
2593
2597
2587
1.13
59
9
2925
2923
2924
2920
1.00
60
8
2930
2614
2606
2613
1.12
61
9
2932
2946
2946
2947
1.00
62
9
2926
2935
2937
2947
0.99
63
10
2958
3258
3192
3266
0.91
64
6
2902
1957
1956
1959
1.48
65
7
2903
2274
2267
2273
1.28
66
7
2909
2277
2276
2286
1.27
67
8
2908
2602
2606
2599
1.12
68
7
2894
2272
2279
2276
1.27
69
8
2923
2597
2606
2606
1.12
70
8
2910
2596
2599
2600
1.12
71
9
2926
2921
2927
2924
1.00
72
7
2909
2283
2273
2273
1.28
73
8
2909
2602
2602
2599
1.12
74
8
2914
2602
2602
2603
1.12
75
9
2924
2925
2927
2933
1.00
76
8
2904
2608
2602
2601
1.12
77
9
2911
2919
2917
2909
1.00
78
9
2927
2921
2917
2935
1.00
79
10
2929
3241
3246
3246
0.90
80
7
2903
2273
2276
2275
1.28
81
8
2916
2596
2592
2589
1.13
82
8
2913
2600
2597
2598
1.12
83
9
2925
2931
2926
2913
1.00
84
8
2917
2598
2606
2597
1.12
85
9
2920
2916
2918
2927
1.00
86
9
2942
2922
2944
2936
1.00
87
10
2961
3254
3259
3268
0.91
88
8
2934
2607
2608
2612
1.12
89
9
2918
2939
2931
2916
1.00
90
9
2927
2928
2920
2924
1.00
91
10
2940
3253
3252
3246
0.91
92
9
2924
2933
2926
2928
1.00
93
10
2940
3259
3237
3251
0.90
94
10
2928
3247
3247
3264
0.90
95
11
2933
3599
3593
3594
0.82
96
7
2883
2282
2268
2269
1.27
97
8
2911
2602
2595
2600
1.12
98
8
2896
2588
2591
2587
1.12
99
9
2924
2939
2936
2938
1.00
As you can see, a power() call takes exactly as long as 9 multiplication instructions. Therefore every manual rewriting of a power with less than 9 multiplications is faster.
Only the cases 2, 3, 4, and 8 are optimized by my driver. The optimization is independent of whether you use the .0 suffix for the exponent.
In the case of exponent = 2, my implementation seems to have lower performance than the driver. I am not sure, why.
The speedup is the manual implementation compared to pow(x,exponent+0.01), which cannot be optimized by the compiler.
Because the multiplications and the speedup align so perfectly, I created a graph to show the relationship. This relationship kind of shows that my benchmark is trustworthy :).
Operating System: Windows 10 Personal
GPU: RX 580 8GB from Gigabyte
Processor: Ryzen 5 2600
Memory: 16 GB DDR4 3200
GPU Driver: 21.6.1 from 17th June 2021
LWJGL: Version 3.2.3 build 13

While this can definitely be hardware/vendor/compiler dependent, advanced mathematical functions like pow() tend to be considerably more expensive than basic operations.
The best approach is of course to try both, and benchmark. But if there is a simple replacement for an advanced mathematical functions, I don't think you can go very wrong by using it.
If you write pow(x, 3.0), the best you can probably hope for is that the compiler will recognize the special case, and expand it. But why take the risk, if the replacement is just as short and easy to read? C/C++ compilers don't always replace pow(x, 2.0) by a simple multiplication, so I wouldn't necessarily count on all GLSL compilers to do that.

Related

Get average area of polygon per ID from data frame

I have a data frame with over 4000 pts. Each point has 2 ID columns that identify a hierarchical grouping system let's call them l_1 and l_2. l_1 indicates points that are grouped together. I want to make a convex hull for each of these groups and then measure the area for each convex hull polygon.
Then, I want to estimate the average convex hull area based on the second grouping ID named l_2. Ideally the outcome would be a data frame with a row for the average convex polygon area for each unique l_2 identifier.
So far I am trying to create a list of data frames based on the l_1 column. Something like:
areas <- lapply(df$l_1, function(x){
sfObs<-df %>% filter(l_1 == x) %>%
st_as_sf(., coords = c('x', 'y'), crs = 4326)
areas<-st_convex_hull(st_union(chk)) %>% st_area()
return(areas)
})
But I only get empty polygons so far. It works with a single value for x but when I run it as a list of values it spits out empty polygons.
After that, I would average convex polygon area by using group_by and summarise(mean()) as follows, employing the l_2 ID column.
df.areas<-do.call(areas, 'rbind') %>% cbind(unique(df$l_1), .)%>%
left_join(., df, by=l_1) %>%
group_by(l_2)%>%
summarise(aveArea=mean(area))
But as I can't get past the first bit so I am stuck. I would be grateful for any ideas on how to achieve the end goal, but please only using sf functions.
A subset of the data:
l_2 l_1 x y
1 17 149 151.8930 -23.42907
2 17 149 151.8815 -23.41670
3 17 149 151.8805 -23.42031
4 17 149 151.8532 -23.41637
5 17 149 151.8284 -23.41455
6 17 149 151.8212 -23.40360
7 17 149 151.8057 -23.39490
8 17 149 151.7897 -23.39090
9 17 149 151.8055 -23.40893
10 17 149 151.8041 -23.40735
11 17 149 151.7980 -23.41180
12 17 149 151.7958 -23.41051
13 17 149 151.8015 -23.40578
14 17 149 151.8023 -23.40141
15 17 149 151.7873 -23.39065
16 17 149 151.7690 -23.39123
17 17 149 151.7663 -23.38577
18 17 149 151.7654 -23.39139
19 17 151 151.8086 -23.44059
20 17 151 151.7972 -23.43462
21 17 151 151.8080 -23.43974
22 17 153 151.7794 -23.36882
23 17 153 151.7792 -23.34290
24 17 153 151.7802 -23.34012
25 17 157 151.7664 -23.37117
26 17 157 151.7783 -23.37342
27 17 157 151.7962 -23.36544
28 17 157 151.8079 -23.35681
29 17 157 151.8006 -23.35412
30 17 157 151.8030 -23.35334
31 17 157 151.8030 -23.36052
32 17 157 151.8075 -23.36844
33 17 157 151.8057 -23.37128
34 17 157 151.7990 -23.37499
35 17 157 151.7937 -23.37959
36 17 159 151.8643 -23.42937
37 17 159 151.8726 -23.41774
38 17 159 151.8905 -23.42103
39 17 159 151.9041 -23.43649
40 17 161 151.8440 -23.38699
41 17 161 151.8498 -23.37978
42 17 161 151.8499 -23.36631
43 17 161 151.8344 -23.33939
44 17 161 151.8332 -23.33175
45 17 161 151.8370 -23.33839
46 17 161 151.8384 -23.33640
47 17 161 151.8440 -23.33435
48 17 161 151.8317 -23.34718
49 17 161 151.8279 -23.34407
50 17 161 151.8310 -23.34102
51 17 161 151.8337 -23.34140
52 17 163 151.8272 -23.36147
53 17 163 151.8161 -23.35445
54 17 163 151.8159 -23.34914
55 17 163 151.8134 -23.33415
56 6 649 151.9532 -23.42466
57 6 649 151.9680 -23.42602
58 6 649 151.9744 -23.42791
59 6 649 151.9925 -23.42612
60 6 649 152.0139 -23.42027
61 6 649 152.0235 -23.41462
62 6 649 152.0243 -23.41289
63 6 649 152.0236 -23.40959
64 6 649 152.0268 -23.40911
65 6 649 152.0276 -23.40897
66 6 649 152.0259 -23.40767
67 6 651 151.8505 -23.44435
68 6 651 151.8516 -23.44453
69 6 651 151.8400 -23.44005
70 6 651 151.8260 -23.44468
71 6 651 151.8196 -23.44625
72 6 651 151.8213 -23.44360
73 6 651 151.8111 -23.42271
74 6 651 151.8220 -23.40930
75 6 651 151.8160 -23.42438
76 6 651 151.8115 -23.43400
77 6 651 151.8269 -23.44965
78 6 651 151.8485 -23.45157
79 6 651 151.8471 -23.45342
80 6 651 151.8506 -23.45705
81 6 651 151.8489 -23.45228
82 6 651 151.8562 -23.45304
83 6 651 151.8552 -23.45212
84 6 651 151.8579 -23.44707
85 6 651 151.8644 -23.44840
86 6 651 151.8667 -23.44603
87 6 651 151.8775 -23.44708
88 6 653 151.9705 -23.42842
89 6 653 151.9733 -23.42767
90 6 655 151.9024 -23.41702
91 6 655 151.9138 -23.40610
92 6 655 151.9095 -23.40876
93 6 655 151.9015 -23.39602
94 6 655 151.9252 -23.37706
95 6 655 151.9308 -23.37199
96 6 655 151.9307 -23.36946
97 6 655 151.9805 -23.39567
98 6 655 152.0065 -23.41577
99 6 655 152.0196 -23.41305
100 6 655 152.0211 -23.41244
101 6 655 152.0113 -23.41101
102 6 655 152.0142 -23.40985
103 6 655 152.0150 -23.40754
104 6 655 152.0041 -23.40394
105 8 669 151.8945 -23.64410
106 8 669 151.8890 -23.66261
107 8 669 151.9000 -23.66387
108 8 669 151.9067 -23.66830
109 8 669 151.9094 -23.68123
110 8 669 151.8967 -23.69244
111 8 669 151.9107 -23.69545
112 8 669 151.9192 -23.69091
113 8 669 151.9273 -23.68480
114 8 669 151.9409 -23.66136
115 8 669 151.9361 -23.66283
116 8 669 151.9396 -23.66090
117 8 669 151.9432 -23.65804
118 8 669 151.9488 -23.65748
119 8 669 151.9521 -23.65517
120 8 669 151.9595 -23.65920
121 8 669 151.9666 -23.66185
122 8 669 151.9724 -23.65896
123 8 669 151.9802 -23.65798
124 8 669 151.9735 -23.63510
125 8 669 151.9558 -23.61360
126 8 669 151.9589 -23.61100
127 8 669 151.9623 -23.60884
128 8 669 151.9645 -23.61030
129 8 669 151.9685 -23.61122
130 8 669 151.9681 -23.60686
131 8 669 151.9612 -23.60467
132 8 671 151.9500 -23.47789
133 8 671 151.9495 -23.47786
134 8 671 151.9456 -23.47541
135 8 671 151.9448 -23.47416
136 8 671 151.9606 -23.48151
137 8 671 151.9637 -23.47959
138 8 671 151.9766 -23.47657
139 8 673 151.9711 -23.53105
140 8 673 151.9903 -23.51980
141 8 673 152.0149 -23.52661
142 8 673 152.0172 -23.52828
143 8 673 152.0168 -23.53076
144 8 673 152.0146 -23.53149
145 8 673 152.0108 -23.53228
146 8 673 152.0114 -23.53236
147 8 673 152.0145 -23.53364
148 8 675 152.0148 -23.47530
149 8 675 152.0200 -23.46649
150 8 675 152.0185 -23.46562
151 8 675 152.0181 -23.44782
152 8 675 152.0190 -23.43633
153 8 675 152.0049 -23.41639
154 8 675 152.0067 -23.40699
155 8 675 152.0127 -23.41182
156 8 675 152.0138 -23.41197
157 8 675 152.0136 -23.40980
158 8 675 152.0183 -23.40843
159 8 675 152.0190 -23.40862
160 8 677 151.8494 -23.55435
161 8 677 151.8476 -23.54912
162 8 679 151.8122 -23.62238
163 8 679 151.8100 -23.61953
164 8 679 151.8074 -23.61739
165 8 679 151.8040 -23.61299
166 8 679 151.8101 -23.61499
167 8 679 151.8097 -23.61255
168 8 679 151.8049 -23.61203
169 8 679 151.8048 -23.60668
170 8 679 151.8048 -23.60774
171 8 679 151.8209 -23.61589
172 8 679 151.8223 -23.60883
173 8 679 151.8217 -23.61741
174 8 679 151.8229 -23.61998
175 8 679 151.8241 -23.62179
176 8 679 151.8394 -23.62616
177 8 679 151.8384 -23.62278
178 8 681 151.8474 -23.62581
179 8 681 151.8470 -23.62196
180 8 681 151.8505 -23.62026
181 8 681 151.8511 -23.61996
182 8 681 151.8506 -23.62811
183 8 681 151.8394 -23.65246
184 8 681 151.8179 -23.65648
185 8 681 151.8081 -23.65494
186 8 681 151.8008 -23.65538
187 8 681 151.8032 -23.64207
188 8 681 151.8129 -23.64435
189 8 681 151.8141 -23.64182
190 8 681 151.8167 -23.63823

To store values in Array or vector of Objects reading from file c++98

There are 6 columns entries in a file, where each column specifies values for(days, hours, Temperature, Relative Humidity, wind speed, global horizontal solar radiation) respectively. How we can store these values in array or vector of objects ? please help
1 1 13.7 58 2.7 0
1 2 13.5 64 1.4 0
1 3 13 70 0 0
1 4 12.2 75 0.5 0
1 5 11.4 80 1 0
1 6 10.6 85 1.5 0
1 7 11.1 80 1 0
1 8 11.5 78 0.5 13
1 9 12 76 0 150
1 10 15.1 76 1 355
1 11 18.3 73 2.1 532
1 12 21.4 70 3.1 652
1 13 21.9 62 2.9 706
1 14 22.5 56 2.8 686
1 15 23 49 2.6 593
1 16 22.6 50 2.4 434
1 17 22.2 52 2.3 234
1 18 21.8 53 2.1 45
1 19 19.9 57 1.4 0
1 20 17.9 60 0.7 0
1 21 16 63 0 0
1 22 15.7 60 1.2 0
1 23 15.5 56 2.4 0
1 24 15.2 53 3.6 0
2 1 14.1 58 2.4 0
2 2 13.1 63 1.2 0
2 3 12 69 0 0
2 4 11.1 74 0 0
2 5 10.1 79 0 0
2 6 9.2 84 0 0
2 7 9.9 79 0.3 0
2 8 10.7 75 0.7 13
2 9 11.4 71 1 150
2 10 13.5 60 1.3 358
2 11 15.6 51 1.7 539
2 12 17.7 43 2.1 664
2 13 19.8 37 2.4 718
2 14 21.9 31 2.7 697
2 15 24 26 3.1 603
2 16 23.7 27 2.8 443
2 17 23.3 28 2.4 240
2 18 23 29 2.1 47
2 19 21.1 35 1.9 0
2 20 19.1 42 1.7 0
2 21 17.2 50 1.5 0
2 22 16.1 53 1.5 0
2 23 15.1 57 1.5 0
2 24 14 61 1.5 0
std::getline to extract each line, and std::stringstream to extract each value from this line:
#include <iostream>
#include <string>
#include <fstream>
#include <sstream>
#include <vector>
struct Data {
unsigned int days, hours;
float humidity, windSpeed, radiation;
};
int main()
{
std::vector<Data> data;
std::ifstream file("yourfile.txt");
for (std::string line; std::getline(file, line); )
{
Data d;
std::stringstream ss(line);
ss >> d.days >> d.hours >> d.humidity >> d.windSpeed >> d.radiation;
data.push_back(d);
}
}

How to get Current Row of a CSV File

Trying to skip i rows of a data file (then process j rows). Plenty of answers on length of csv file. No success with count or .line_num.
How is current line number accessed in csv.reader?
import csv
def csv_reader(df):
i = 90
with open(df, 'r') as csvfile:
for line in range(0, i):
next(csvfile)
for line in csv.reader(csvfile, delimiter=' ', skipinitialspace = True):
print(csv.reader.line_num) # Invalid line
Sample data:
Last Quarter
Visit Astronomy
Daily Weather History & Observations
2018 Temp. (°C) Dew Point (°C) Humidity (%) Sea Level Press. (hPa) Visibility (km) Wind (km/h) Precip. (mm) Events
Mar high avg low high avg low high avg low high avg low high avg low high avg high sum
1 27 19 12 13 9 4 82 49 21 1016 1012 1007 10 10 10 24 11 - 0.00
2 25 20 14 14 12 9 82 61 32 1017 1014 1010 10 10 10 21 10 - 0.00
3 31 22 14 15 13 7 94 59 13 1014 1011 1007 10 10 10 27 8 - 0.00
4 30 21 13 15 13 6 82 59 13 1016 1012 1009 10 10 10 34 11 - 0.00
5 24 19 15 16 13 11 82 71 46 1022 1016 1013 10 10 10 35 13 - 0.00 Rain
6 20 14 9 12 9 6 82 60 31 1028 1024 1021 10 10 10 32 19 47 0.00
7 23 16 9 13 10 7 100 71 29 1029 1027 1024 10 10 5 29 11 37 0.00 Rain
Other random data follows
You can access line number using enumerate.
for index, line in enumerate(csv.reader(csvfile, delimiter=' ', skipinitialspace = True)):
print('Index %s' % str(index + 1)) # enumerate starts with 0

Sequential READ or WRITE not allowed after EOF marker

I have this code:
SUBROUTINE FNDKEY
1( FOUND ,IWBEG ,IWEND ,KEYWRD ,INLINE ,
2 NFILE ,NWRD )
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
LOGICAL FOUND
CHARACTER*80 INLINE
CHARACTER*(*) KEYWRD
DIMENSION
1 IWBEG(40), IWEND(40)
C***********************************************************************
C FINDS AND READS A LINE CONTAINING A SPECIFIED KEYWORD FROM A FILE.
C THIS ROUTINE SEARCHES FOR A GIVEN KEYWORD POSITIONED AS THE FIRST
C WORD OF A LINE IN A FILE.
C IF THE GIVEN KEYWORD IS FOUND THEN THE CORRESPONDING LINE IS READ AND
C RETURNED TOGETHER WITH THE NUMBER OF WORDS IN THE LINE AND TWO INTEGER
C ARRAYS CONTAINING THE POSITION OF THE BEGINNING AND END OF EACH WORD.
C***********************************************************************
1000 FORMAT(A80)
C
FOUND=.TRUE.
IEND=0
10 READ(NFILE,1000,END=20)INLINE
NWRD=NWORD(INLINE,IWBEG,IWEND)
IF(NWRD.NE.0)THEN
IF(INLINE(IWBEG(1):IWEND(1)).EQ.KEYWRD)THEN
GOTO 999
ENDIF
ENDIF
GOTO 10
20 IF(IEND.EQ.0)THEN
IEND=1
REWIND NFILE
GOTO 10
ELSE
FOUND=.FALSE.
ENDIF
999 RETURN
END
And the following file named "2.dat" that I am trying to read:
TITLE
Example 7.5.3 - Simply supported uniformly loaded circular plate
ANALYSIS_TYPE 3 (Axisymmetric)
AXIS_OF_SYMMETRY Y
LARGE_STRAIN_FORMULATION OFF
SOLUTION_ALGORITHM 2
ELEMENT_GROUPS 1
1 1 1
ELEMENT_TYPES 1
1 QUAD_8
4 GP
ELEMENTS 10
1 1 1 19 11 20 16 21 13 22
2 1 13 21 16 23 10 24 2 25
3 1 3 26 18 27 17 28 4 29
4 1 18 30 7 31 12 32 17 27
5 1 3 33 5 34 14 35 18 26
6 1 18 35 14 36 6 37 7 30
7 1 5 38 8 39 15 40 14 34
8 1 14 40 15 41 9 42 6 36
9 1 10 23 16 43 17 32 12 44
10 1 16 20 11 45 4 28 17 43
NODE_COORDINATES 45 CARTESIAN
1 0.0000000000e+00 0.0000000000e+00
2 0.0000000000e+00 1.0000000000e+00
3 6.0000000000e+00 0.0000000000e+00
4 4.0000000000e+00 0.0000000000e+00
5 8.0000000000e+00 0.0000000000e+00
6 8.0000000000e+00 1.0000000000e+00
7 6.0000000000e+00 1.0000000000e+00
8 1.0000000000e+01 0.0000000000e+00
9 1.0000000000e+01 1.0000000000e+00
10 2.0000000000e+00 1.0000000000e+00
11 2.0000000000e+00 0.0000000000e+00
12 4.0000000000e+00 1.0000000000e+00
13 0.0000000000e+00 5.0000000000e-01
14 8.0000000000e+00 5.0000000000e-01
15 1.0000000000e+01 5.0000000000e-01
16 2.0000000000e+00 5.0000000000e-01
17 4.0000000000e+00 5.0000000000e-01
18 6.0000000000e+00 5.0000000000e-01
19 1.0000000000e+00 0.0000000000e+00
20 2.0000000000e+00 2.5000000000e-01
21 1.0000000000e+00 5.0000000000e-01
22 0.0000000000e+00 2.5000000000e-01
23 2.0000000000e+00 7.5000000000e-01
24 1.0000000000e+00 1.0000000000e+00
25 0.0000000000e+00 7.5000000000e-01
26 6.0000000000e+00 2.5000000000e-01
27 5.0000000000e+00 5.0000000000e-01
28 4.0000000000e+00 2.5000000000e-01
29 5.0000000000e+00 0.0000000000e+00
30 6.0000000000e+00 7.5000000000e-01
31 5.0000000000e+00 1.0000000000e+00
32 4.0000000000e+00 7.5000000000e-01
33 7.0000000000e+00 0.0000000000e+00
34 8.0000000000e+00 2.5000000000e-01
35 7.0000000000e+00 5.0000000000e-01
36 8.0000000000e+00 7.5000000000e-01
37 7.0000000000e+00 1.0000000000e+00
38 9.0000000000e+00 0.0000000000e+00
39 1.0000000000e+01 2.5000000000e-01
40 9.0000000000e+00 5.0000000000e-01
41 1.0000000000e+01 7.5000000000e-01
42 9.0000000000e+00 1.0000000000e+00
43 3.0000000000e+00 5.0000000000e-01
44 3.0000000000e+00 1.0000000000e+00
45 3.0000000000e+00 0.0000000000e+00
NODES_WITH_PRESCRIBED_DISPLACEMENTS 6
1 10 0.000 0.000 0.000
2 10 0.000 0.000 0.000
8 01 0.000 0.000 0.000
13 10 0.000 0.000 0.000
22 10 0.000 0.000 0.000
25 10 0.000 0.000 0.000
MATERIALS 1
1 VON_MISES
0.0
1.E+07 0.240
2
0.000 16000.0
1.000 16000.0
LOADINGS EDGE
EDGE_LOADS 5
2 3 10 24 2
1.000 1.000 1.000 0.000 0.000 0.000
4 3 7 31 12
1.000 1.000 1.000 0.000 0.000 0.000
6 3 6 37 7
1.000 1.000 1.000 0.000 0.000 0.000
8 3 9 42 6
1.000 1.000 1.000 0.000 0.000 0.000
9 3 10 12 44
1.000 1.000 1.000 0.000 0.000 0.000
*
* Monotonic loading to collapse
*
INCREMENTS 12
100.0 0.10000E-06 11 1 1 0 1 0
100.0 0.10000E-06 11 1 1 0 1 0
20.0 0.10000E-06 11 1 1 0 1 0
10.0 0.10000E-06 11 1 1 0 0 0
10.0 0.10000E-06 11 1 1 0 1 0
10.0 0.10000E-06 11 1 1 0 0 0
5.0 0.10000E-06 11 1 1 1 1 0
2.0 0.10000E-06 11 1 1 0 0 0
2.0 0.10000E-06 11 1 1 0 0 0
0.5 0.10000E-06 11 1 1 1 1 0
0.25 0.10000E-06 11 1 1 0 0 0
0.02 0.10000E-06 11 1 1 0 0 0
And I am getting the following error:
At line 22 of file GENERAL/fndkey.f (unit = 15, file = './2.dat')
Fortran runtime error: Sequential READ or WRITE not allowed after EOF marker, possibly use REWIND or BACKSPACE
The following file is the one that call's FNDKEY. When it calls FNDKWYm it passes to KEYWRD the string "RESTART".
SUBROUTINE RSTCHK( RSTINP ,RSTRT )
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
LOGICAL RSTRT
CHARACTER*256 RSTINP
C
LOGICAL AVAIL,FOUND
CHARACTER*80 INLINE
DIMENSION IWBEG(40),IWEND(40)
C***********************************************************************
C CHECKS WETHER MAIN DATA IS TO BE READ FROM INPUT RE-START FILE
C AND SET INPUT RE-START FILE NAME IF REQUIRED
C***********************************************************************
1000 FORMAT(////,
1' Main input data read from re-start file'/
2' ======================================='///
3' Input re-start file name ------> ',A)
C
C Checks whether the input data file contains the keyword RESTART
C
CALL FNDKEY
1( FOUND ,IWBEG ,IWEND ,'RESTART',
2 INLINE ,15 ,NWRD )
IF(FOUND)THEN
C sets re-start flag and name of input re-start file
RSTRT=.TRUE.
RSTINP=INLINE(IWBEG(2):IWEND(2))//'.rst'
WRITE(16,1000)INLINE(IWBEG(2):IWEND(2))//'.rst'
C checks existence of the input re-start file
INQUIRE(FILE=RSTINP,EXIST=AVAIL)
IF(.NOT.AVAIL)CALL ERRPRT('ED0096')
ELSE
RSTRT=.FALSE.
ENDIF
C
RETURN
END
I solved the problem adding the comand BACKSPACE(NFILE) above the RETURN:
SUBROUTINE FNDKEY
1( FOUND ,IWBEG ,IWEND ,KEYWRD ,INLINE ,
2 NFILE ,NWRD )
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
LOGICAL FOUND
CHARACTER*80 INLINE
CHARACTER*(*) KEYWRD
DIMENSION
1 IWBEG(40), IWEND(40)
C***********************************************************************
C FINDS AND READS A LINE CONTAINING A SPECIFIED KEYWORD FROM A FILE.
C THIS ROUTINE SEARCHES FOR A GIVEN KEYWORD POSITIONED AS THE FIRST
C WORD OF A LINE IN A FILE.
C IF THE GIVEN KEYWORD IS FOUND THEN THE CORRESPONDING LINE IS READ AND
C RETURNED TOGETHER WITH THE NUMBER OF WORDS IN THE LINE AND TWO INTEGER
C ARRAYS CONTAINING THE POSITION OF THE BEGINNING AND END OF EACH WORD.
C***********************************************************************
1000 FORMAT(A80)
C
FOUND=.TRUE.
IEND=0
10 READ(NFILE,1000,END=20)INLINE
NWRD=NWORD(INLINE,IWBEG,IWEND)
PRINT *,KEYWRD
IF(NWRD.NE.0)THEN
IF(INLINE(IWBEG(1):IWEND(1)).EQ.KEYWRD)THEN
GOTO 999
ENDIF
ENDIF
GOTO 10
20 IF(IEND.EQ.0)THEN
IEND=1
REWIND NFILE
GOTO 10
ELSE
FOUND=.FALSE.
ENDIF
BACKSPACE(NFILE)
999 RETURN
END

Extracting specific lines of data from a log file

I'm looking to extract and print a specific line from a table I have in a long log file. It looks something like this:
******************************************************************************
XSCALE (VERSION July 4, 2012) 4-Jun-2013
******************************************************************************
Author: Wolfgang Kabsch
Copy licensed until 30-Jun-2013 to
academic users for non-commercial applications
No redistribution.
******************************************************************************
CONTROL CARDS
******************************************************************************
MAXIMUM_NUMBER_OF_PROCESSORS=16
RESOLUTION_SHELLS= 20 10 6 4 3 2.5 2.0 1.9 1.8 1.7 1.6 1.5 1.4 1.3 1.2 1.1 1.0 0.9 0.8
MINIMUM_I/SIGMA=4.0
OUTPUT_FILE=fae-ip.ahkl
INPUT_FILE= /dls/sci-scratch/Sam/FC59251/fr6_1/XDS_ASCII.HKL
THE DATA COLLECTION STATISTICS REPORTED BELOW ASSUMES:
SPACE_GROUP_NUMBER= 97
UNIT_CELL_CONSTANTS= 128.28 128.28 181.47 90.000 90.000 90.000
***** 16 EQUIVALENT POSITIONS IN SPACE GROUP # 97 *****
If x',y',z' is an equivalent position to x,y,z, then
x'=x*ML(1)+y*ML( 2)+z*ML( 3)+ML( 4)/12.0
y'=x*ML(5)+y*ML( 6)+z*ML( 7)+ML( 8)/12.0
z'=x*ML(9)+y*ML(10)+z*ML(11)+ML(12)/12.0
# 1 2 3 4 5 6 7 8 9 10 11 12
1 1 0 0 0 0 1 0 0 0 0 1 0
2 -1 0 0 0 0 -1 0 0 0 0 1 0
3 -1 0 0 0 0 1 0 0 0 0 -1 0
4 1 0 0 0 0 -1 0 0 0 0 -1 0
5 0 1 0 0 1 0 0 0 0 0 -1 0
6 0 -1 0 0 -1 0 0 0 0 0 -1 0
7 0 -1 0 0 1 0 0 0 0 0 1 0
8 0 1 0 0 -1 0 0 0 0 0 1 0
9 1 0 0 6 0 1 0 6 0 0 1 6
10 -1 0 0 6 0 -1 0 6 0 0 1 6
11 -1 0 0 6 0 1 0 6 0 0 -1 6
12 1 0 0 6 0 -1 0 6 0 0 -1 6
13 0 1 0 6 1 0 0 6 0 0 -1 6
14 0 -1 0 6 -1 0 0 6 0 0 -1 6
15 0 -1 0 6 1 0 0 6 0 0 1 6
16 0 1 0 6 -1 0 0 6 0 0 1 6
ALL DATA SETS WILL BE SCALED TO /dls/sci-scratch/Sam/FC59251/fr6_1/XDS_ASCII.HKL
******************************************************************************
READING INPUT REFLECTION DATA FILES
******************************************************************************
DATA MEAN REFLECTIONS INPUT FILE NAME
SET# INTENSITY ACCEPTED REJECTED
1 0.1358E+03 1579957 0 /dls/sci-scratch/Sam/FC59251/fr6_1/XDS_ASCII.HKL
******************************************************************************
CORRECTION FACTORS AS FUNCTION OF IMAGE NUMBER & RESOLUTION
******************************************************************************
RECIPROCAL CORRECTION FACTORS FOR INPUT DATA SETS MERGED TO
OUTPUT FILE: fae-ip.ahkl
THE CALCULATIONS ASSUME FRIEDEL'S_LAW= TRUE
TOTAL NUMBER OF CORRECTION FACTORS DEFINED 720
DEGREES OF FREEDOM OF CHI^2 FIT 357222.9
CHI^2-VALUE OF FIT OF CORRECTION FACTORS 1.024
NUMBER OF CYCLES CARRIED OUT 4
CORRECTION FACTORS for visual inspection by XDS-Viewer DECAY_001.cbf
XMIN= 0.6 XMAX= 1799.3 NXBIN= 36
YMIN= 0.00049 YMAX= 0.44483 NYBIN= 20
NUMBER OF REFLECTIONS USED FOR DETERMINING CORRECTION FACTORS 396046
******************************************************************************
CORRECTION FACTORS AS FUNCTION OF X (fast) & Y(slow) IN THE DETECTOR PLANE
******************************************************************************
RECIPROCAL CORRECTION FACTORS FOR INPUT DATA SETS MERGED TO
OUTPUT FILE: fae-ip.ahkl
THE CALCULATIONS ASSUME FRIEDEL'S_LAW= TRUE
TOTAL NUMBER OF CORRECTION FACTORS DEFINED 7921
DEGREES OF FREEDOM OF CHI^2 FIT 356720.6
CHI^2-VALUE OF FIT OF CORRECTION FACTORS 1.023
NUMBER OF CYCLES CARRIED OUT 3
CORRECTION FACTORS for visual inspection by XDS-Viewer MODPIX_001.cbf
XMIN= 5.4 XMAX= 2457.6 NXBIN= 89
YMIN= 40.0 YMAX= 2516.7 NYBIN= 89
NUMBER OF REFLECTIONS USED FOR DETERMINING CORRECTION FACTORS 396046
******************************************************************************
CORRECTION FACTORS AS FUNCTION OF IMAGE NUMBER & DETECTOR SURFACE POSITION
******************************************************************************
RECIPROCAL CORRECTION FACTORS FOR INPUT DATA SETS MERGED TO
OUTPUT FILE: fae-ip.ahkl
THE CALCULATIONS ASSUME FRIEDEL'S_LAW= TRUE
TOTAL NUMBER OF CORRECTION FACTORS DEFINED 468
DEGREES OF FREEDOM OF CHI^2 FIT 357286.9
CHI^2-VALUE OF FIT OF CORRECTION FACTORS 1.022
NUMBER OF CYCLES CARRIED OUT 3
CORRECTION FACTORS for visual inspection by XDS-Viewer ABSORP_001.cbf
XMIN= 0.6 XMAX= 1799.3 NXBIN= 36
DETECTOR_SURFACE_POSITION= 1232 1278
DETECTOR_SURFACE_POSITION= 1648 1699
DETECTOR_SURFACE_POSITION= 815 1699
DETECTOR_SURFACE_POSITION= 815 858
DETECTOR_SURFACE_POSITION= 1648 858
DETECTOR_SURFACE_POSITION= 2174 1673
DETECTOR_SURFACE_POSITION= 1622 2230
DETECTOR_SURFACE_POSITION= 841 2230
DETECTOR_SURFACE_POSITION= 289 1673
DETECTOR_SURFACE_POSITION= 289 884
DETECTOR_SURFACE_POSITION= 841 326
DETECTOR_SURFACE_POSITION= 1622 326
DETECTOR_SURFACE_POSITION= 2174 884
NUMBER OF REFLECTIONS USED FOR DETERMINING CORRECTION FACTORS 396046
******************************************************************************
CORRECTION PARAMETERS FOR THE STANDARD ERROR OF REFLECTION INTENSITIES
******************************************************************************
The variance v0(I) of the intensity I obtained from counting statistics is
replaced by v(I)=a*(v0(I)+b*I^2). The model parameters a, b are chosen to
minimize the discrepancies between v(I) and the variance estimated from
sample statistics of symmetry related reflections. This model implicates
an asymptotic limit ISa=1/SQRT(a*b) for the highest I/Sigma(I) that the
experimental setup can produce (Diederichs (2010) Acta Cryst D66, 733-740).
Often the value of ISa is reduced from the initial value ISa0 due to systematic
errors showing up by comparison with other data sets in the scaling procedure.
(ISa=ISa0=-1 if v0 is unknown for a data set.)
a b ISa ISa0 INPUT DATA SET
1.086E+00 1.420E-03 25.46 29.00 /dls/sci-scratch/Sam/FC59251/fr6_1/XDS_ASCII.HKL
FACTOR TO PLACE ALL DATA SETS TO AN APPROXIMATE ABSOLUTE SCALE 0.4178E+04
(ASSUMING A PROTEIN WITH 50% SOLVENT)
******************************************************************************
STATISTICS OF SCALED OUTPUT DATA SET : fae-ip.ahkl
FILE TYPE: XDS_ASCII MERGE=FALSE FRIEDEL'S_LAW=TRUE
186 OUT OF 1579957 REFLECTIONS REJECTED
1579771 REFLECTIONS ON OUTPUT FILE
******************************************************************************
DEFINITIONS:
R-FACTOR
observed = (SUM(ABS(I(h,i)-I(h))))/(SUM(I(h,i)))
expected = expected R-FACTOR derived from Sigma(I)
COMPARED = number of reflections used for calculating R-FACTOR
I/SIGMA = mean of intensity/Sigma(I) of unique reflections
(after merging symmetry-related observations)
Sigma(I) = standard deviation of reflection intensity I
estimated from sample statistics
R-meas = redundancy independent R-factor (intensities)
Diederichs & Karplus (1997), Nature Struct. Biol. 4, 269-275.
CC(1/2) = percentage of correlation between intensities from
random half-datasets. Correlation significant at
the 0.1% level is marked by an asterisk.
Karplus & Diederichs (2012), Science 336, 1030-33
Anomal = percentage of correlation between random half-sets
Corr of anomalous intensity differences. Correlation
significant at the 0.1% level is marked.
SigAno = mean anomalous difference in units of its estimated
standard deviation (|F(+)-F(-)|/Sigma). F(+), F(-)
are structure factor estimates obtained from the
merged intensity observations in each parity class.
Nano = Number of unique reflections used to calculate
Anomal_Corr & SigAno. At least two observations
for each (+ and -) parity are required.
SUBSET OF INTENSITY DATA WITH SIGNAL/NOISE >= -3.0 AS FUNCTION OF RESOLUTION
RESOLUTION NUMBER OF REFLECTIONS COMPLETENESS R-FACTOR R-FACTOR COMPARED I/SIGMA R-meas CC(1/2) Anomal SigAno Nano
LIMIT OBSERVED UNIQUE POSSIBLE OF DATA observed expected Corr
20.00 557 66 74 89.2% 2.7% 3.0% 557 58.75 2.9% 100.0* 45 1.674 25
10.00 5018 417 417 100.0% 2.4% 3.1% 5018 75.34 2.6% 100.0* 2 0.812 276
6.00 18352 1583 1584 99.9% 2.8% 3.3% 18351 65.55 2.9% 100.0* 11* 0.914 1248
4.00 59691 4640 4640 100.0% 3.2% 3.5% 59690 64.96 3.4% 100.0* 4 0.857 3987
3.00 112106 8821 8822 100.0% 4.4% 4.4% 112102 50.31 4.6% 99.9* -3 0.844 7906
2.50 147954 11023 11023 100.0% 8.7% 8.6% 147954 29.91 9.1% 99.8* 0 0.829 10096
2.00 332952 24698 24698 100.0% 21.4% 21.6% 332949 14.32 22.3% 99.2* 1 0.804 22992
1.90 106645 8382 8384 100.0% 56.5% 57.1% 106645 5.63 58.8% 94.7* -2 0.767 7886
1.80 138516 10342 10343 100.0% 86.8% 87.0% 138516 3.64 90.2% 87.9* -2 0.762 9741
1.70 175117 12897 12899 100.0% 140.0% 140.1% 175116 2.15 145.4% 69.6* -2 0.732 12188
1.60 209398 16298 16304 100.0% 206.1% 208.5% 209397 1.35 214.6% 48.9* -2 0.693 15466
1.50 273432 20770 20893 99.4% 333.4% 342.1% 273340 0.80 346.9% 23.2* -1 0.644 19495
1.40 33 27 27248 0.1% 42.6% 112.7% 12 0.40 60.3% 88.2 0 0.000 0
1.30 0 0 36205 0.0% -99.9% -99.9% 0 -99.00 -99.9% 0.0 0 0.000 0
1.20 0 0 49238 0.0% -99.9% -99.9% 0 -99.00 -99.9% 0.0 0 0.000 0
1.10 0 0 68746 0.0% -99.9% -99.9% 0 -99.00 -99.9% 0.0 0 0.000 0
1.00 0 0 98884 0.0% -99.9% -99.9% 0 -99.00 -99.9% 0.0 0 0.000 0
0.90 0 0 147505 0.0% -99.9% -99.9% 0 -99.00 -99.9% 0.0 0 0.000 0
0.80 0 0 230396 0.0% -99.9% -99.9% 0 -99.00 -99.9% 0.0 0 0.000 0
total 1579771 119964 778303 15.4% 12.8% 13.1% 1579647 14.33 13.4% 99.9* -1 0.755 111306
========== STATISTICS OF INPUT DATA SET ==========
R-FACTORS FOR INTENSITIES OF DATA SET /dls/sci-scratch/Sam/FC59251/fr6_1/XDS_ASCII.HKL
RESOLUTION R-FACTOR R-FACTOR COMPARED
LIMIT observed expected
20.00 2.7% 3.0% 557
10.00 2.4% 3.1% 5018
6.00 2.8% 3.3% 18351
4.00 3.2% 3.5% 59690
3.00 4.4% 4.4% 112102
2.50 8.7% 8.6% 147954
2.00 21.4% 21.6% 332949
1.90 56.5% 57.1% 106645
1.80 86.8% 87.0% 138516
1.70 140.0% 140.1% 175116
1.60 206.1% 208.5% 209397
1.50 333.4% 342.1% 273340
1.40 42.6% 112.7% 12
1.30 -99.9% -99.9% 0
1.20 -99.9% -99.9% 0
1.10 -99.9% -99.9% 0
1.00 -99.9% -99.9% 0
0.90 -99.9% -99.9% 0
0.80 -99.9% -99.9% 0
total 12.8% 13.1% 1579647
******************************************************************************
WILSON STATISTICS OF SCALED DATA SET: fae-ip.ahkl
******************************************************************************
Data is divided into resolution shells and a straight line
A - 2*B*SS is fitted to log<I>, where
RES = mean resolution (Angstrom) in shell
SS = mean of (sin(THETA)/LAMBDA)**2 in shell
<I> = mean reflection intensity in shell
BO = (A - log<I>)/(2*SS)
# = number of reflections in resolution shell
WILSON LINE (using all data) : A= 14.997 B= 29.252 CORRELATION= 0.99
# RES SS <I> log(<I>) BO
1667 8.445 0.004 2.3084E+06 14.652 49.2
2798 5.260 0.009 1.5365E+06 14.245 41.6
3547 4.106 0.015 2.0110E+06 14.514 16.3
4147 3.480 0.021 1.2910E+06 14.071 22.4
4688 3.073 0.026 7.3586E+05 13.509 28.1
5154 2.781 0.032 4.6124E+05 13.042 30.3
5568 2.560 0.038 3.1507E+05 12.661 30.6
5966 2.384 0.044 2.4858E+05 12.424 29.2
6324 2.240 0.050 1.8968E+05 12.153 28.5
6707 2.119 0.056 1.3930E+05 11.844 28.3
7030 2.016 0.062 9.1378E+04 11.423 29.0
7331 1.926 0.067 5.4413E+04 10.904 30.4
7664 1.848 0.073 3.5484E+04 10.477 30.9
7934 1.778 0.079 2.4332E+04 10.100 31.0
8193 1.716 0.085 1.8373E+04 9.819 30.5
8466 1.660 0.091 1.4992E+04 9.615 29.7
8743 1.609 0.097 1.1894E+04 9.384 29.1
9037 1.562 0.102 9.4284E+03 9.151 28.5
9001 1.520 0.108 8.3217E+03 9.027 27.6
HIGHER ORDER MOMENTS OF WILSON DISTRIBUTION OF CENTRIC DATA
AS COMPARED WITH THEORETICAL VALUES. (EXPECTED: 1.00)
# RES <I**2>/ <I**3>/ <I**4>/
3<I>**2 15<I>**3 105<I>**4
440 8.445 0.740 0.505 0.294
442 5.260 0.762 0.733 0.735
442 4.106 0.888 0.788 0.717
439 3.480 1.339 1.733 2.278
438 3.073 1.168 1.259 1.400
440 2.781 1.215 1.681 2.269
438 2.560 1.192 1.603 2.405
450 2.384 1.117 1.031 0.891
432 2.240 1.214 1.567 2.173
438 2.119 0.972 0.992 0.933
445 2.016 1.029 1.019 0.986
441 1.926 1.603 1.701 1.554
440 1.848 1.544 1.871 2.076
436 1.778 0.927 0.661 0.435
444 1.716 1.134 1.115 1.197
440 1.660 1.271 1.618 2.890
436 1.609 1.424 1.045 0.941
448 1.562 1.794 1.447 1.423
426 1.520 2.517 1.496 2.099
8355 overall 1.253 1.255 1.455
HIGHER ORDER MOMENTS OF WILSON DISTRIBUTION OF ACENTRIC DATA
AS COMPARED WITH THEORETICAL VALUES. (EXPECTED: 1.00)
# RES <I**2>/ <I**3>/ <I**4>/
2<I>**2 6<I>**3 24<I>**4
1227 8.445 1.322 1.803 2.340
2356 5.260 1.167 1.420 1.789
3105 4.106 1.010 1.046 1.100
3708 3.480 1.055 1.262 1.592
4250 3.073 0.999 1.083 1.375
4714 2.781 1.061 1.232 1.591
5130 2.560 1.049 1.178 1.440
5516 2.384 1.025 1.117 1.290
5892 2.240 1.001 1.058 1.230
6269 2.119 1.060 1.140 1.233
6585 2.016 1.109 1.344 1.709
6890 1.926 1.028 1.100 1.222
7224 1.848 1.060 1.150 1.348
7498 1.778 1.143 1.309 1.655
7749 1.716 1.182 1.299 1.549
8026 1.660 1.286 1.376 1.538
8307 1.609 1.419 1.481 1.707
8589 1.562 1.663 1.750 2.119
8575 1.520 2.271 2.172 5.088
111610 overall 1.253 1.354 1.804
======= CUMULATIVE INTENSITY DISTRIBUTION =======
DEFINITIONS:
<I> = mean reflection intensity
Na(Z)exp = expected number of acentric reflections with I <= Z*<I>
Na(Z)obs = observed number of acentric reflections with I <= Z*<I>
Nc(Z)exp = expected number of centric reflections with I <= Z*<I>
Nc(Z)obs = observed number of centric reflections with I <= Z*<I>
Nc(Z)obs/Nc(Z)exp versus resolution and Z (0.1-1.0)
# RES 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0
440 8.445 0.75 0.95 0.98 1.00 0.98 0.99 1.00 1.00 1.02 1.02
442 5.260 1.18 1.11 1.09 1.09 1.07 1.08 1.08 1.08 1.07 1.06
442 4.106 0.97 1.01 0.98 0.97 0.96 0.94 0.92 0.91 0.92 0.94
439 3.480 0.91 0.88 0.91 0.91 0.89 0.90 0.90 0.89 0.89 0.93
438 3.073 0.92 0.92 0.90 0.93 0.94 0.99 1.02 0.99 0.96 0.96
440 2.781 0.98 1.01 1.02 1.05 1.04 1.03 1.04 1.02 1.01 1.01
438 2.560 1.02 1.10 1.05 1.03 1.01 1.03 1.04 1.01 1.04 1.02
450 2.384 0.78 0.93 0.92 0.93 0.89 0.89 0.92 0.95 0.96 0.95
432 2.240 0.69 0.82 0.84 0.86 0.91 0.92 0.93 0.94 0.95 0.95
438 2.119 0.75 0.87 0.95 1.02 1.09 1.09 1.12 1.12 1.10 1.08
445 2.016 0.86 0.86 0.87 0.90 0.91 0.93 0.98 0.99 1.00 1.00
441 1.926 0.88 0.79 0.79 0.81 0.82 0.84 0.85 0.85 0.86 0.86
440 1.848 1.00 0.89 0.85 0.83 0.85 0.85 0.88 0.90 0.90 0.92
436 1.778 1.03 0.87 0.79 0.79 0.80 0.84 0.85 0.87 0.90 0.92
444 1.716 1.09 0.85 0.81 0.78 0.80 0.80 0.81 0.81 0.84 0.85
440 1.660 1.27 1.01 0.93 0.88 0.85 0.84 0.84 0.85 0.88 0.91
436 1.609 1.34 1.00 0.89 0.83 0.80 0.80 0.80 0.81 0.80 0.83
448 1.562 1.39 1.09 0.93 0.86 0.81 0.78 0.77 0.79 0.78 0.78
426 1.520 1.38 1.03 0.88 0.83 0.82 0.80 0.78 0.76 0.75 0.74
8355 overall 1.01 0.95 0.92 0.91 0.91 0.91 0.92 0.92 0.93 0.93
Na(Z)obs/Na(Z)exp versus resolution and Z (0.1-1.0)
# RES 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0
1227 8.445 1.10 1.22 1.21 1.21 1.14 1.10 1.12 1.10 1.11 1.09
2356 5.260 1.15 1.10 1.09 1.03 1.03 1.03 1.01 1.01 1.01 1.00
3105 4.106 0.91 0.96 0.99 1.01 1.02 1.00 1.00 0.99 0.99 1.00
3708 3.480 0.93 0.97 1.00 1.06 1.05 1.04 1.04 1.04 1.04 1.05
4250 3.073 0.94 1.02 1.01 1.00 1.01 1.00 1.00 1.01 1.02 1.02
4714 2.781 1.11 1.04 1.02 1.02 1.02 1.01 1.01 1.01 1.00 1.00
5130 2.560 1.00 1.10 1.06 1.03 1.01 1.02 1.01 1.01 1.01 1.02
5516 2.384 1.09 1.08 1.05 1.04 1.04 1.02 1.01 1.01 1.01 1.01
5892 2.240 0.98 0.99 1.00 1.01 1.01 1.01 1.00 1.00 1.00 1.00
6269 2.119 1.14 1.04 1.02 1.00 1.00 1.00 1.01 1.02 1.02 1.01
6585 2.016 1.17 1.02 1.01 1.02 1.02 1.03 1.02 1.02 1.02 1.02
6890 1.926 1.35 1.07 1.00 0.99 1.00 1.01 1.01 1.00 1.00 1.01
7224 1.848 1.52 1.11 1.01 0.97 0.96 0.98 0.98 0.98 0.98 0.99
7498 1.778 1.80 1.22 1.03 0.97 0.95 0.94 0.95 0.95 0.95 0.96
7749 1.716 2.01 1.28 1.07 0.99 0.94 0.92 0.92 0.92 0.93 0.93
8026 1.660 2.31 1.41 1.13 1.01 0.95 0.92 0.90 0.89 0.89 0.89
8307 1.609 2.62 1.54 1.19 1.04 0.95 0.90 0.88 0.87 0.86 0.87
8589 1.562 2.94 1.69 1.29 1.10 1.00 0.93 0.89 0.86 0.85 0.85
8575 1.520 3.14 1.78 1.34 1.13 1.01 0.93 0.88 0.85 0.83 0.83
111610 overall 1.73 1.24 1.09 1.03 0.99 0.97 0.96 0.96 0.96 0.96
List of 33 reflections *NOT* obeying Wilson distribution (Z> 10.0)
h k l RES Z Intensity Sigma
72 11 61 1.52 17.34 0.2886E+06 0.2367E+05 "alien"
67 53 6 1.50 15.85 0.2638E+06 0.1128E+06 "alien"
35 10 25 3.17 14.39 0.2118E+08 0.2364E+06 "alien"
46 17 99 1.50 14.16 0.2357E+06 0.9588E+05 "alien"
34 32 2 2.75 13.44 0.1239E+08 0.1279E+06 "alien"
79 6 15 1.60 13.10 0.3117E+06 0.2477E+05 "alien"
61 20 33 1.88 12.54 0.8900E+06 0.3054E+05 "alien"
44 4 48 2.30 12.38 0.4695E+07 0.6072E+05 "alien"
66 25 19 1.79 11.89 0.5788E+06 0.2739E+05 "alien"
66 25 11 1.81 11.88 0.5781E+06 0.2771E+05 "alien"
60 43 61 1.50 11.77 0.1959E+06 0.9769E+05 "alien"
72 11 17 1.74 11.64 0.4278E+06 0.2619E+05 "alien"
80 24 26 1.50 11.41 0.1899E+06 0.9793E+05 "alien"
41 21 26 2.59 11.09 0.6988E+07 0.7945E+05 "alien"
44 18 20 2.59 11.08 0.6982E+07 0.7839E+05 "alien"
23 3 62 2.59 11.06 0.6971E+07 0.9154E+05 "alien"
69 7 22 1.80 11.06 0.5383E+06 0.2564E+05 "alien"
73 10 15 1.72 10.98 0.4036E+06 0.2356E+05 "alien"
70 17 35 1.68 10.96 0.3286E+06 0.2415E+05 "alien"
57 24 41 1.88 10.91 0.7746E+06 0.2842E+05 "alien"
82 24 6 1.50 10.74 0.1787E+06 0.1019E+06 "alien"
69 25 62 1.50 10.67 0.1775E+06 0.8689E+05 "alien"
24 20 44 2.91 10.45 0.9641E+07 0.1017E+06 "alien"
66 43 5 1.63 10.37 0.2468E+06 0.2294E+05 "alien"
81 4 29 1.53 10.36 0.1725E+06 0.2364E+05 "alien"
60 40 26 1.72 10.32 0.3792E+06 0.2578E+05 "alien"
39 18 57 2.18 10.24 0.3885E+07 0.5573E+05 "alien"
70 41 15 1.57 10.19 0.1922E+06 0.2281E+05 "alien"
55 36 41 1.79 10.16 0.4942E+06 0.2967E+05 "alien"
37 4 81 1.88 10.15 0.7202E+06 0.3357E+05 "alien"
56 27 5 2.06 10.14 0.1854E+07 0.3569E+05 "alien"
44 39 29 2.06 10.09 0.1844E+07 0.3805E+05 "alien"
65 46 29 1.56 10.06 0.1898E+06 0.2270E+05 "alien"
List of 33 reflections *NOT* obeying Wilson distribution (sorted by resolution)
Ice rings could occur at (Angstrom):
3.897,3.669,3.441, 2.671,2.249,2.072, 1.948,1.918,1.883,1.721
h k l RES Z Intensity Sigma
82 24 6 1.50 10.74 0.1787E+06 0.1019E+06
67 53 6 1.50 15.85 0.2638E+06 0.1128E+06
80 24 26 1.50 11.41 0.1899E+06 0.9793E+05
60 43 61 1.50 11.77 0.1959E+06 0.9769E+05
69 25 62 1.50 10.67 0.1775E+06 0.8689E+05
46 17 99 1.50 14.16 0.2357E+06 0.9588E+05
72 11 61 1.52 17.34 0.2886E+06 0.2367E+05
81 4 29 1.53 10.36 0.1725E+06 0.2364E+05
65 46 29 1.56 10.06 0.1898E+06 0.2270E+05
70 41 15 1.57 10.19 0.1922E+06 0.2281E+05
79 6 15 1.60 13.10 0.3117E+06 0.2477E+05
66 43 5 1.63 10.37 0.2468E+06 0.2294E+05
70 17 35 1.68 10.96 0.3286E+06 0.2415E+05
73 10 15 1.72 10.98 0.4036E+06 0.2356E+05
60 40 26 1.72 10.32 0.3792E+06 0.2578E+05
72 11 17 1.74 11.64 0.4278E+06 0.2619E+05
66 25 19 1.79 11.89 0.5788E+06 0.2739E+05
55 36 41 1.79 10.16 0.4942E+06 0.2967E+05
69 7 22 1.80 11.06 0.5383E+06 0.2564E+05
66 25 11 1.81 11.88 0.5781E+06 0.2771E+05
61 20 33 1.88 12.54 0.8900E+06 0.3054E+05
57 24 41 1.88 10.91 0.7746E+06 0.2842E+05
37 4 81 1.88 10.15 0.7202E+06 0.3357E+05
56 27 5 2.06 10.14 0.1854E+07 0.3569E+05
44 39 29 2.06 10.09 0.1844E+07 0.3805E+05
39 18 57 2.18 10.24 0.3885E+07 0.5573E+05
44 4 48 2.30 12.38 0.4695E+07 0.6072E+05
44 18 20 2.59 11.08 0.6982E+07 0.7839E+05
41 21 26 2.59 11.09 0.6988E+07 0.7945E+05
23 3 62 2.59 11.06 0.6971E+07 0.9154E+05
34 32 2 2.75 13.44 0.1239E+08 0.1279E+06
24 20 44 2.91 10.45 0.9641E+07 0.1017E+06
35 10 25 3.17 14.39 0.2118E+08 0.2364E+06
cpu time used by XSCALE 25.9 sec
elapsed wall-clock time 28.1 sec
I would like to extract the second last line where the 11th column has a number followed by an asterisk (xy.z*) and the lines above and below that. That is from the table with SUBSET OF INTENSITY DATA WITH SIGNAL/NOISE >= -3.0 AS FUNCTION OF RESOLUTION Above it.
For example in this table the line I'm looking for would contain "23.2*" from the 11th column (CC(1/2)). I would like the second last with an asterisk because the last would be the line that starts with total, and this was a lot easier to extract with a simple grep command.
So the expected output for the code in this case would be to print the lines:
1.60 209398 16298 16304 100.0% 206.1% 208.5% 209397 1.35 214.6% 48.9* -2 0.693 15466
1.50 273432 20770 20893 99.4% 333.4% 342.1% 273340 0.80 346.9% 23.2* -1 0.644 19495
1.40 33 27 27248 0.1% 42.6% 112.7% 12 0.40 60.3% 88.2 0 0.000 0
And so on for all the different possible positions of the asterisk in the table.
In my previous question I recieved the answer
sed -n '/LIMIT/,/=/{/^\s*\(\S*\s*\)\{10\}[0-9.-]*\*/H;x;s/^.*\n\(.*\n.*\)$/\1/;x;/=/{x;P;q}}' file
Which worked really well (thanks Endoro) for extracting just the second last line in the 11th column with the asterisk, (which is what i asked for) but now I just need that editing slightly, or if you would rather make a whole new line, to include the lines above and below.
Here is a link to the previous question Extracting the second last line from a table using a specific number followed by an asterisk (e.g. xy.z*)
Any help would be greatly appreciated.
Sam
Code for GNU sed
sed -rn '/LIMIT/,/total/{//!H};/total/{x;s/^.*\n(.*\n)((\s+\S+){10}\s+[0-9.]+\*(\s+\S+){3}\n(\s+\S+){14}).*/\1\2/;p;q}' file
$sed -rn '/LIMIT/,/total/{//!H};/total/{x;s/^.*\n(.*\n)((\s+\S+){10}\s+[0-9.]+\*(\s+\S+){3}\n(\s+\S+){14}).*/\1\2/;p;q}' file
1.60 209398 16298 16304 100.0% 206.1% 208.5% 209397 1.35 214.6% 48.9* -2 0.693 15466
1.50 273432 20770 20893 99.4% 333.4% 342.1% 273340 0.80 346.9% 23.2* -1 0.644 19495
1.40 33 27 27248 0.1% 42.6% 112.7% 12 0.40 60.3% 88.2 0 0.000 0
A bit dirty but should work:
awk '
/^ *SUBSET OF INTENSITY/,/^ *total/ {
a[++i]=$0;
b[i]=$11
}
END {
for(o=i-1;o>=0;o--)
if (b[o]~/\*/) {
print a[o-1]"\n"a[o]"\n"a[o+1]
break
}
}' log