Bazel: how to glob headers into one include path - c++

In Buck, one might write:
exported_headers = subdir_glob([
("lib/source", "video/**/*.h"),
("lib/source", "audio/**/*.h"),
],
excludes = [
"lib/source/video/codecs/*.h",
],
prefix = "MediaLib/")
This line would make those headers available under MediaLib/. What would be the equivalent in Bazel?

I ended up writing a rule to do this. It provides something similar to the output of a filegroup, and could be combined with cc_library in a macro.
def _impl_flat_hdr_dir(ctx):
path = ctx.attr.include_path
d = ctx.actions.declare_directory(path)
dests = [ctx.actions.declare_file(path + "/" + h.basename)
for h in ctx.files.hdrs]
cmd = """
mkdir -p {path};
cp {hdrs} {path}/.
""".format(path=d.path, hdrs=" ".join([h.path for h in ctx.files.hdrs]))
ctx.actions.run_shell(
command = cmd,
inputs = ctx.files.hdrs,
outputs = dests + [d],
progress_message = "doing stuff!!!"
)
return struct(
files = depset(dests)
)
flat_hdr_dir = rule(
_impl_flat_hdr_dir,
attrs = {
"hdrs": attr.label_list(allow_files = True),
"include_path": attr.string(mandatory = True),
},
output_to_genfiles = True,
)

So I did not test it but comming from the documentation it should be similar to:
cc_library(
name = "foo",
srcs = glob([
"video/**/*.h",
"audio/**/*.h",
],
excludes = [ "lib/source/video/codecs/*.h" ]
),
include_prefix = "MediaLib/"
)
https://docs.bazel.build/versions/master/be/c-cpp.html#cc_library.include_prefix
https://docs.bazel.build/versions/master/be/functions.html#glob

Related

custom cc_toolchain used in bazel rule

I've been trying to write a bazel rule to wrap compiling for risc-v source files, does some other stuff, etc, but I've been having some trouble with getting a CcToolchainInfo provider.
I have a rule that works that looks like
rv_cc_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
in order to provide config info. I have the following in toolchains/BUILD:
load(":cc_toolchain_config.bzl", "rv_cc_toolchain_config")
package(default_visibility = ['//visibility:public'])
rv_cc_toolchain_config(name="rv_toolchain_cfg")
cc_toolchain(
name='rv_toolchain',
toolchain_identifier='rv-toolchain',
toolchain_config=':rv_toolchain_cfg',
all_files=':nofile',
strip_files=':nofile',
objcopy_files=':nofile',
dwp_files=':nofile',
compiler_files=':nofile',
linker_files=':nofile',
)
This seems to all work fine; I then have my custom rule to compile with riscv:
def _compile_impl(ctx):
deps = []
cc_toolchain = find_cpp_toolchain(ctx)
print(ctx.attr._cc_toolchain)
compilation_contexts = [dep[CcInfo].compilation_context for dep in deps]
print(type(cc_toolchain))
feature_configuration = cc_common.configure_features( #fails here
ctx = ctx,
cc_toolchain = cc_toolchain,
requested_features = ctx.features, #currently does nothing
unsupported_features = ctx.disabled_features,
)
rv_compile = rule(
_compile_impl,
output_to_genfiles = True,
attrs = {
"srcs": attr.label_list(
doc = "List of source files",
mandatory = False,
allow_files = [".cc", ".cpp", ".h", ".c"],
),
"hdrs": attr.label_list(
doc = "List of header files",
allow_files = [".h"],
),
"_cc_toolchain": attr.label(
#default = Label("#bazel_tools//tools/cpp:current_cc_toolchain"),
default = Label("//toolchains:rv_toolchain")
),
},
provides = [
DefaultInfo,
CcInfo,
],
toolchains = [
"#bazel_tools//tools/cpp:toolchain_type",
],
fragments = ["cpp"]
)
Where I fail when trying to configure the toolchain because cc_toolchain is of type ToolchainInfo and not the required CcToolchainInfo. Does anyone have any insight on how to provide CcToolchainInfo within a rule? Or is there a better way of doing this? Documentation seems to go dark on this.
Oops -- figured this out after trolling through github. Turns out the problem is directly referencing cc_toolchain is incorrect, and that CcToolchainInfo is provided via cc_toolchain_suite
updating toolchains/BUILD to look something like
load(":cc_toolchain_config.bzl", "rv_cc_toolchain_config")
package(default_visibility = ['//visibility:public'])
rv_cc_toolchain_config(name="rv_toolchain_cfg")
filegroup(name = 'empty')
cc_toolchain(
name='rv_toolchain',
toolchain_identifier='sanity-toolchain',
toolchain_config=':rv_toolchain_cfg',
all_files=':empty',
strip_files=':empty',
objcopy_files=':empty',
dwp_files=':empty',
compiler_files=':empty',
linker_files=':empty',
)
cc_toolchain_suite(
name='rv',
toolchains={
'darwin': ':rv_toolchain', #use whatever OS you need here...
}
)
and the rv compile rule to something like
rv_compile = rule(
_compile_impl,
output_to_genfiles = True,
attrs = {
"srcs": attr.label_list(
doc = "List of source files",
mandatory = False,
allow_files = [".cc", ".cpp", ".h", ".c"],
),
"hdrs": attr.label_list(
doc = "List of header files",
allow_files = [".h"],
),
"_cc_toolchain": attr.label(
#default = Label("#bazel_tools//tools/cpp:current_cc_toolchain"),
default = Label("//toolchains:rv")
),
},
provides = [
DefaultInfo,
CcInfo,
],
toolchains = [
"#bazel_tools//tools/cpp:toolchain_type",
],
fragments = ["cpp"]
)
works like a charm :) anyone reading this should also enable expirimental skylark cpp apis as well. if anyone knows how to make cc_toolchain_suite cpu agnostic, i'd love to hear it. cheers.

Generated header not found

I'm trying to use Bazel to build a cpp project that use Flatbuffers.
But my map_schema_generated.h generated with flatc is not found.
My tree:
|
|_ data
| |_ maps
| |_ BUILD
| |_ map_schema.fbs
|
|_ src
| |_ map
| |_ BUILD
| |_ map.hpp
| |_ map.cpp
|
|_ tools
| |_ BUILD
| |_ generate_fbs.bzl
|
|_ WORKSPACE
tools/generate_fbs.bzl:
def _impl(ctx):
output = ctx.outputs.out
input = ctx.files.srcs
print("generating", output.basename)
ctx.action(
use_default_shell_env = True,
outputs = [output],
inputs = input,
progress_message="Generating %s with %s" % (output.path, input[0].path),
command="flatc -o %s --cpp %s" % (output.dirname, input[0].path)
)
generate_fbs = rule(
implementation=_impl,
output_to_genfiles = True,
attrs={
"srcs": attr.label_list(allow_files=True, allow_empty=False),
"out": attr.output()
},
)
data/maps/BUILD:
load("//tools:generate_fbs.bzl", "generate_fbs")
generate_fbs(
name = "schema",
srcs = ["map_schema.fbs"],
out = "map_schema_generated.h",
visibility = ["//visibility:public"]
)
src/map/BUILD:
cc_library(
name = "map",
srcs = [
"//data/maps:map_schema_generated.h",
"map.hpp",
"map.cpp"
]
)
src/map/map.cc has #include "map_schema_generated.h".
The command line I use to build: bazel build //src/map.
If I find in bazel-*, I got:
bazel-genfiles/data/maps/map_schema_generated.h
bazel-out/k8-fastbuild/genfiles/data/maps/map_schema_generated.h
bazel-my-workspace-name/bazel-out/k8-fastbuild/genfiles/data/maps/map_schema_generated.h
And if I cat these files, I can see that they are well generated.
All the information that I found are about Tensorflow, and are not really helpful.
Best regards,
The problem is that your cc_library actually doesn't really recognize your generated header as requiring any special action (like adding -I flag for the location it's in). It gets generate and lives in the build tree, but not anywhere the compiler (preprocessor) would look for it working on map.cpp. (Run build with -s for a bit more insight about what and how happened).
Now about how to address this, there might be a better way, but this would appear to work. I guess this functionality could also be rolled into generate_fbs rule.
In data/maps/BUILD I've added "header only" library as follows:
cc_library(
name = "map_schema_hdr",
hdrs = [":map_schema_generated.h"],
include_prefix = ".", # to manipulate -I of dependenices
visibility = ["//visibility:public"]
)
In src/map/BUILD I would then use this header only library as dependency of map:
cc_library(
name = "map",
srcs = [
"map.cpp"
"map.hpp"
],
deps = [
"//data/maps:map_schema_hdr",
]
)
To play a bit more with the idea of having a single rule (macro) for convenience, I've made the following changes:
tools/generate_fbs.bzl now reads:
def _impl(ctx):
output = ctx.outputs.out
input = ctx.files.srcs
print("generating", output.basename)
ctx.action(
use_default_shell_env = True,
outputs = [output],
inputs = input,
progress_message="Generating %s with %s" % (output.path, input[0].path),
command="/bin/cp %s %s" % (input[0].path, output.path)
)
_generate_fbs = rule(
implementation=_impl,
output_to_genfiles = True,
attrs={
"srcs": attr.label_list(allow_files=True, allow_empty=False),
"out": attr.output()
},
)
def generate_fbs(name, srcs, out):
_generate_fbs(
name = "_%s" % name,
srcs = srcs,
out = out
)
native.cc_library(
name = name,
hdrs = [out],
include_prefix = ".",
visibility = ["//visibility:public"],
)
With that, I could have data/maps/BUILD:
load("//tools:generate_fbs.bzl", "generate_fbs")
generate_fbs(
name = "schema",
srcs = ["map_schema.fbs"],
out = "map_schema_generated.h",
)
And src/map/BUILD contains:
cc_library(
name = "map",
srcs = [
"map.cpp",
"map.hpp",
],
deps = [
"//data/maps:schema",
]
)
And bazel build //src/map builds bazel-bin/src/map/libmap.a and bazel-bin/src/map/libmap.so.
Instead of #include "map_schema_generated.h" in src/map/map.cpp, I could have write `#include "data/maps/map_schema_generated.h".
I think it is the cleanest way to make it works.

Including Protobufs with Bazel

I have the following BUILD file:
load("#com_google_protobuf//:protobuf.bzl", "cc_proto_library")
### Protos ###
cc_proto_library(
name = "homework_cc_proto",
protoc = "#com_google_protobuf//:protoc",
default_runtime = "#com_google_protobuf//:protobuf",
)
proto_library(
name = "homework_proto",
srcs = [
"protos/complexity.proto",
"protos/example.proto",
"protos/problem.proto",
"protos/solution.proto",
],
)
### End Protos ###
### Binaries ###
cc_binary(
name = "main",
srcs = ["main.cc"],
deps = [":homework_cc_proto"],
)
and main.cc:
#include <iostream>
#include "example.pb.h"
int main() {
std::cout << "Hello!" << std::endl;
}
If I invoke bazel build :homework_cc_proto, the build is successful. However, when I run bazel build :main I get an error saying that example.pb.h cannot be found. How can I import my built protobufs?
Your cc_proto_library needs to depend on homework_proto.
cc_proto_library(
name = "homework_cc_proto",
protoc = "#com_google_protobuf//:protoc",
default_runtime = "#com_google_protobuf//:protobuf",
deps = [ ":homework_proto" ],
)

Create vTiger Sales Order fails due to MANDATORY_FIELDS_MISSING "message":"Mandatory Fields Missing

I'm trying to create a SalesOrder via WebServices but it always fails due to missing mandatory fields.
I'm sending the following fields.
The error message does not specify the missing fields
I'm using vTiger 6.0.0
How can I figure it out
salesOrder.subject = fullDescription
salesOrder.sostatus = "delivered"
salesOrder.account_id ='11x28'
salesOrder.bill_street = shipping.address.street
salesOrder.bill_city = shipping.address.city
salesOrder.bill_state = shipping.address.state
salesOrder.bill_code = shipping.address.postalCode
salesOrder.bill_country = shipping.address.postalCode
salesOrder.ship_street = shipping.address.street
salesOrder.ship_city = shipping.address.city
salesOrder.ship_state = shipping.address.state
salesOrder.ship_code = shipping.address.postalCode
salesOrder.ship_country = shipping.address.postalCode
salesOrder.invoicestatus = "Created"
salesOrder.productid = selectedServices[0].id
salesOrder.quantity = 1
salesOrder.listprice = selectedServices[0].unit_price
//
salesOrder.comment= ""
salesOrder.tax1 = ""
salesOrder.tax2 = "10.000"
salesOrder.tax3 = "6.000"
salesOrder.pre_tax_total = "876.00"
salesOrder.currency_id = "21x1"
salesOrder.conversion_rate = "1.000"
salesOrder.tax4 = ""
salesOrder.duedate = "2014-12-12"
salesOrder.carrier = "FedEx"
salesOrder.pending = ""
salesOrder.txtAdjustment = "-21.00000000"
salesOrder.salescommission = "5.000"
salesOrder.exciseduty = "0.000"
salesOrder.hdnGrandTotal = "995.16000000"
salesOrder.hdnSubTotal = "876.00000000"
salesOrder.hdnTaxType = "group"
salesOrder.hdndiscountamount = "0"
salesOrder.hdnS_H_Percent = "21"
salesOrder.discount_percent = "25.000"
salesOrder.discount_amount = ""
salesOrder.terms_conditions = "- Unless "
salesOrder.enable_recurring = "0"
I was missing the LineItems. Please notice that it has to be CamelCase
"LineItems": [
{
"productid": "25x142",
"listprice": "299.00000000",
"quantity": "1"
}
],
you missed the mandatory field "Assigned TO"

Is there a way to filter a text file using grep (or any other tool), so that you can get a section of the file that's encased in bracers or brackets?

I got several files that look something like this:
universe = {
["stars"] = {
["Sun"] = {
["planets"] = "9",
["life"] = "Yes",
["asteroid"] = "9001"
},
["Alpha Centauri"] = {
["planets"] = "3",
["life"] = "No",
["asteroid"] = "20"
},
["Rigel"] = {
["planets"] = "5",
["life"] = "No",
["asteroid"] = "11"
}
}
}
My intention is to find, for instance, every block where ["life"] equals "No". I realize this could be handled better if it was within a database (or something with a structure), but I'm not sure how to convert this data onto that.
I have a bunch of files in this format, and I'd like to run a command that could display the sections (up to the immediate parent bracket) where the condition is true, so for the previous example, I'd like to get:
["Alpha Centauri"] = {
["planets"] = "3",
["life"] = "No",
["asteroid"] = "20"
},
["Rigel"] = {
["planets"] = "5",
["life"] = "No",
["asteroid"] = "11"
}
Can this be done with GREP? Or is there any other tool that could do something like this?
Any help is greatly appreciated. Thanks in advance.
EDIT
Example 2: https://regex101.com/r/jO9dU5/1
Try this Lua program:
local function find(w,t,p)
for k,v in pairs(t) do
if v==w then
print(p.."."..k)
elseif type(v)=="table" then
find(w,v,p.."."..k)
end
end
end
find("No",universe,"universe")
Add the definition of universe before this code.
If you really want to do text processing, try this instead:
S=[[
universe = {
...
}
]]
for w in S:gmatch('%b[] = {[^{]-"No".-},?') do
print(w)
end
Yep, it's possible through grep which supports -P (Perl Regex) parameter.
$ grep -oPz '.*\[[^\[\]]*\]\s*=\s*\{[^{}]*\["life"\]\s*=\s*"No"[^{}]*}.*' file
["Alpha Centauri"] = {
["planets"] = "3",
["life"] = "No",
["asteroid"] = "20"
},
["Rigel"] = {
["planets"] = "5",
["life"] = "No",
["asteroid"] = "11"
}
DEMO
From grep --help
-z, --null-data a data line ends in 0 byte, not newline
-o, --only-matching show only the part of a line matching PATTERN
Update:
\[[^\n]*\]\h*=\h*\{(?!,\s*\[[^\[\]]*\]\h*=\h*{).*?\["fontSize"\]\h*=\h*20,.*?\}(?=,\s*\[[^\[\]]*\]\h*=\h*{|\s*})
DEMO
$ pcregrep -oM '(?s)[^\n]*\[[^\n]*\]\h*=\h*\{(?!,\s*\[[^\[\]]*\]\h*=\h*{).*?\["fontSize"\]\h*=\h*20,.*?\}(?=,\s*\[[^\[\]]*\]\h*=\h*{|\s*})' file
["frame 1"] = {
["fontSize"] = 20,
["displayStacks"] = "%p",
["xOffset"] = 251.000518798828,
["stacksPoint"] = "BOTTOM",
["regionType"] = "icon",
["yOffset"] = 416.000183105469,
["anchorPoint"] = "CENTER",
["parent"] = "Target Shit",
["numTriggers"] = 1,
["customTextUpdate"] = "update",
["id"] = "Invulnerabilities 2",
["icon"] = true,
["fontFlags"] = "OUTLINE",
["stacksContainment"] = "OUTSIDE",
["zoom"] = 0,
["auto"] = true,
["selfPoint"] = "CENTER",
["width"] = 60,
["frameStrata"] = 1,
["desaturate"] = false,
["stickyDuration"] = true,
["font"] = "Emblem",
["inverse"] = false,
["height"] = 60,
}
["frame 2"] = {
["fontSize"] = 20,
["displayStacks"] = "%p",
["parent"] = "Target Shit",
["xOffset"] = 118.000427246094,
["stacksPoint"] = "BOTTOM",
["anchorPoint"] = "CENTER",
["untrigger"] = {
},
["regionType"] = "icon",
["color"] = {
1, -- [1]
1, -- [2]
1, -- [3]
1, -- [4]
},
["desaturate"] = false,
["frameStrata"] = 1,
["stickyDuration"] = true,
["width"] = 60,
["font"] = "Emblem",
["inverse"] = false,
["icon"] = true,
["height"] = 60,
["yOffset"] = 241
}
(?s) DOTALL modifier which makes dots in your regex to match even line breaks.
Using a proper lua parser in perl
This is not a quick'n'dirty snippet, but a robust way to query a lua's DS :
use strict; use warnings;
use Data::Lua; # lua 2 perl parser
use Data::Dumper; # to dump Data Structures (in color)
# retrieving the lua'DS in a perl's DS
my $root = Data::Lua->parse_file('lua.conf');
# iterating over keys of planet's HASH
foreach my $planet (keys $root->{universe}->{stars}) {
print Dumper { $planet => $root->{universe}->{stars}->{$planet} }
if $root->{universe}->{stars}->{$planet}->{life} eq "No";
}
Output
$VAR1 = {
'Rigel' => {
'planets' => '5',
'life' => 'No',
'asteroid' => '11'
}
};
$VAR1 = {
'Alpha Centauri' => {
'asteroid' => '20',
'life' => 'No',
'planets' => '3'
}
};
How To
install Data::Lua if not already installed with # cpan Data::Lua
put the Data Structure in the file lua.conf
put this script in the same dir in by example lua_DS_parser.pl
run the script with $ perl lua_DS_parser.pl
enjoy ;)
You could use something like this
grep -C 2 -E 'life.+= "No"' path_to_file
But in my opinion better way is converting files to some common format.