I hope you are doing well. I have trained my forecasting model via ML.net in VS 2022. The model is working very well for predicting the amount of income per day and number of patients. My problem is that I cannot train the model. To be Honest! I cannot extract the suitable parameters for ForecastSsa algorithm.
Here is the code:
string[] PersianDates = new string[5];
int[] PatientCounts = new int[5];
ModelOutput ML_Out = PatientPrediction_Consumption.Predict(null, horizon: 31);
float[] Number_TotalPrices = new float[5];
DateTime DateTimeNow = DateTime.Now;
for (int IndexDay = 0; IndexDay <= 4; IndexDay++)
{
DateTimeNow = DateTimeNow.AddDays(1);
PersianCalendar CDP = new PersianCalendar();
Number_TotalPrices[IndexDay] = ML_Out.TotalPrice_UB[CDP.GetDayOfMonth(DateTimeNow)]
- ML_Out.TotalPrice[CDP.GetDayOfMonth(DateTimeNow)];
PersianDates[IndexDay] = FarsiCalendar(DateTimeNow, false);
PatientCounts[IndexDay] = (int)Number_TotalPrices[IndexDay] / 120000;
PatientPrediction_Predicted =
PatientPrediction_Predicted + "\n" + "تاریخ:" + PersianDates[IndexDay] +
"- در آمد:" + Number_TotalPrices[IndexDay] +
" - تعداد مراجعین:" + PatientCounts[IndexDay] + "<br/f>";
}
Related
I am new to AWS and am trying to use Rekognition to identify certain people in a crowd. I am currently trying to index the images of the separate individuals but have hit a snag when trying to create a collection. There seems to a data type compatibility issue when I try using Amazon.Rekognition.Model.S3Object(). I have provided the code below. Does anyone have a solution or a better method? Thank you for your time!
private static void TryIndexFaces()
{
S3Client = new AmazonS3Client();
RekognitionClient = new AmazonRekognitionClient();
IndexFacesRequest indexRequest = new IndexFacesRequest();
Amazon.Rekognition.Model.Image img = new Amazon.Rekognition.Model.Image();
ListObjectsV2Request req = new ListObjectsV2Request();
req.BucketName = "wem0020";
ListObjectsV2Response listObjectsResponse = S3Client.ListObjectsV2(req);
CreateCollectionRequest ccr = new CreateCollectionRequest();
ccr.CollectionId = "TestFaces";
//RekognitionClient.CreateCollection(ccr);
ListVersionsResponse lvr = S3Client.ListVersions(req.BucketName);
string version = lvr.Versions[0].VersionId;
foreach(Amazon.S3.Model.S3Object s3o in listObjectsResponse.S3Objects)
{
Console.WriteLine(s3o.Key);
try
{
if (s3o.Key.EndsWith(".jpg"))
{
Amazon.Rekognition.Model.S3Object reks3o = new Amazon.Rekognition.Model.S3Object();
reks3o.Bucket = req.BucketName;
reks3o.Name = s3o.Key;
Console.WriteLine(version);
reks3o.Version = version;
img.S3Object = reks3o;
indexRequest.Image = img;
indexRequest.CollectionId = ccr.CollectionId;
RekognitionClient.IndexFaces(indexRequest);
}
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}
}
To index faces, use the bounding boxes value returned from aws rekognition. I have done with python
widtho = 717 #width of the given image
heighto = 562 #height of the given image
counter = 0
facecount = 1
s3 = boto3.resource('s3')
bucket = s3.Bucket('rek')
if __name__ == "__main__":
#Choosing the file in s3 bucket
photo = 'sl.jpg'
bucket = 'rek'
#Intilization of rekognition and performing detect_faces
client = boto3.client('rekognition', region_name='eu-west-1')
response = client.detect_faces(
Image={'S3Object': {'Bucket': bucket, 'Name': photo}}, Attributes=['ALL'])
print('Detected faces for ' + photo)
print('The faces are detected and labled from left to right')
for faceDetail in response['FaceDetails']:
print('Face Detected= ', i)
#To mark a bounding box of the image using coordinates
print('Bounding Box')
bboxlen = len(faceDetail['BoundingBox'])
print(bboxlen)
width = faceDetail['BoundingBox'].get('Width')
height = faceDetail['BoundingBox'].get('Height')
left = faceDetail['BoundingBox'].get('Left')
top = faceDetail['BoundingBox'].get('Top')
w = int(width * widtho)
h = int(height * heighto)
x = int(left * widtho)
y = int(top * heighto)
cv2.rectangle(imagere, (x, y), (x + w, y + h), (255, 0, 0), 2)
this loop will index faces one by one in the single frame
Can we add any shape/image with chart data labels in ASPOSE .I need to show one arrow with different colors according to certain values with each data labels in the chart. i am using ASPOSE to generate my ppt. Or there is any way to find the data label positions in ASPOSE.
I have observed your requirements and like to share that MS PowerPoint supports LineWithMarker chart that can display different predefined or custom marker symbols(in the form of image) for different series data points. Please try using following sample code for possible options using Aspose.Slides and MSO charts.
public static void TestScatter()
{
var location = System.Reflection.Assembly.GetExecutingAssembly().Location;
//Open a presentation
Presentation pres = new Presentation();
IChart chart = pres.Slides[0].Shapes.AddChart(ChartType.StackedLineWithMarkers, 10, 10, 400, 400);
//populating cycle
var serie = chart.ChartData.Series[0];
var wbk = chart.ChartData.ChartDataWorkbook;
chart.ChartData.Series.RemoveAt(1);
chart.ChartData.Series.RemoveAt(1);
serie.Marker.Format.Fill.FillType = FillType.Picture;
serie.Marker.Size = 20;
// Set the picture
System.Drawing.Image img = (System.Drawing.Image)new Bitmap(#"C:\Users\Public\Pictures\Sample Pictures\Tulips.jpg");
IPPImage imgx = pres.Images.AddImage(img);
serie.Marker.Format.Fill.PictureFillFormat.Picture.Image = imgx;
//For individual data point
serie.DataPoints[0].Marker.Format.Fill.FillType = FillType.Solid;
serie.DataPoints[0].Marker.Format.Fill.SolidFillColor.Color = Color.Red;
serie.DataPoints[0].Marker.Size = 20;
serie.DataPoints[0].Marker.Symbol = MarkerStyleType.Triangle;
serie.DataPoints[0].Label.DataLabelFormat.ShowValue = true;
serie.DataPoints[1].Label.DataLabelFormat.ShowValue = true;
serie.DataPoints[2].Label.DataLabelFormat.ShowValue = true;
serie.DataPoints[3].Label.DataLabelFormat.ShowValue = true;
pres.Save(Path.Combine(Path.GetDirectoryName(location), "Result2.pptx"), SaveFormat.Pptx);
}
I am working as Support developer/ Evangelist at Aspose.
Many Thanks,
I have further observed your requirements and have observed that you have also posted the similar requirements in Aspose.Slides official support forum as well. Please try using following sample code on your end to serve the purpose.
public static void TestLineChart()
{
var location = System.Reflection.Assembly.GetExecutingAssembly().Location;
//Open a presentation
Presentation pres = new Presentation();
IChart chart = pres.Slides[0].Shapes.AddChart(ChartType.StackedLineWithMarkers, 10, 10, 400, 400);
//populating cycle
var serie = chart.ChartData.Series[0];
var wbk = chart.ChartData.ChartDataWorkbook;
chart.ChartData.Series.RemoveAt(1);
chart.ChartData.Series.RemoveAt(1);
serie.Marker.Format.Fill.FillType = FillType.Picture;
serie.Marker.Size = 20;
serie.Marker.Symbol = MarkerStyleType.Diamond;
serie.Marker.Format.Fill.FillType = FillType.Solid;
serie.Marker.Format.Fill.SolidFillColor.Color=Color.Orange;
serie.Marker.Format.Line.FillFormat.FillType = FillType.Solid;
serie.Marker.Format.Line.FillFormat.SolidFillColor.Color=Color.Red;
serie.Marker.Format.Line.Width=1.0F;
serie.Format.Line.Width = 3.0f;
serie.Format.Line.FillFormat.FillType=FillType.Solid;
serie.Format.Line.FillFormat.SolidFillColor.Color = Color.FromArgb(209,225,91) ;
for(int i=0;i<serie.DataPoints.Count;i++)
{
serie.DataPoints[i].Label.DataLabelFormat.ShowValue = true;
IDataLabel label=serie.Labels[i];
chart.ValidateChartLayout();
IAutoShape ashp=chart.UserShapes.Shapes.AddAutoShape(ShapeType.Triangle,chart.X + label.ActualX + 5, chart.Y + label.ActualY + 5, 20,20);
ashp.FillFormat.FillType = FillType.Solid;
ashp.LineFormat.FillFormat.FillType = FillType.NoFill;
if (i % 2 == 0)//even data points
{
ashp.FillFormat.SolidFillColor.Color = Color.Green;
}
else
{
ashp.Rotation = 180;
ashp.FillFormat.SolidFillColor.Color = Color.Red;
}
}
pres.Save(Path.Combine(Path.GetDirectoryName(location), "Result2.pptx"), Aspose.Slides.Export.SaveFormat.Pptx);
}
I am working as Support developer/ Evangelist at Aspose.
Many Thanks,
I currently have a OpenGL sprite drawing class that buffers up a bunch of sprite data then dumps it with glDrawElements. The problem is, creating the sprites that go into the buffer is cumbersome as I have loads of parameters to pass into the buffer with even more redundancy for the shaders. I was wondering if I could reduce CPU load by only loading the buffer with the essentials, location, orientation, texture coordinates etc... and then let a geometry shader turn that nonsense into quads for the fragment shader.
If theres a different answer, I've added the offending method so you can see what I mean:
void Machine::draw(key num, BoundingBox loc, float angle){
SpriteSyncData* props;
VertexAttribArray* vdata;
GLushort* idata;
SpriteProperties* sprite_props;
int sliceW;
int sliceH;
sprite_props = &spriteList[num];
props = &spriteToSync[sprite_props->atlas];
props->size++;
if(props->size > props->capacity){
props->capacity += COARSE_MEM_SCALE;
props->data = (VertexAttribArray*) realloc((void*) props->data, (sizeof(VertexAttribArray)*4) * props->capacity);
props->i_data = (GLushort*) realloc((void*) props->i_data, (sizeof(GLushort)*4) * props->capacity);
}
vdata = props->data + (props->size - 1) * 4;
idata = props->i_data + (props->size - 1) * 4;
sliceW = sprite_props->location.x1 - sprite_props->location.x0;
sliceH = sprite_props->location.y1 - sprite_props->location.y0;
if(sprite_props->flags & DRAW_TILED){
vdata[0].p = QVector3D(loc.x1, loc.y0, UNIFORM_DEPTH);
vdata[1].p = QVector3D(loc.x0, loc.y0, UNIFORM_DEPTH);
vdata[2].p = QVector3D(loc.x0, loc.y1, UNIFORM_DEPTH);
vdata[3].p = QVector3D(loc.x1, loc.y1, UNIFORM_DEPTH);
vdata[0].s = QVector2D(((float) (loc.x1 - loc.x0)) / sliceW,
((float) (loc.y1 - loc.y0)) / sliceH);
vdata[0].r = QVector2D(0, 0);
vdata[1].r = vdata[0].r;
vdata[2].r = vdata[0].r;
vdata[3].r = vdata[0].r;
}
else{
vdata[0].p = QVector3D(loc.x0 + sliceW, loc.y0, UNIFORM_DEPTH);
vdata[1].p = QVector3D(loc.x0, loc.y0, UNIFORM_DEPTH);
vdata[2].p = QVector3D(loc.x0, loc.y0 + sliceH, UNIFORM_DEPTH);
vdata[3].p = QVector3D(loc.x0 + sliceW, loc.y0 + sliceH, UNIFORM_DEPTH);
vdata[0].s = QVector2D(1, 1);
vdata[0].r = QVector2D(sliceW, sliceH);
vdata[1].r = vdata[0].r;
vdata[2].r = vdata[0].r;
vdata[3].r = vdata[0].r;
}
vdata[0].t = QVector2D(sprite_props->texCoords[2], sprite_props->texCoords[1]);
vdata[1].t = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[1]);
vdata[2].t = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[3]);
vdata[3].t = QVector2D(sprite_props->texCoords[2], sprite_props->texCoords[3]);
vdata[1].s = vdata[0].s;
vdata[2].s = vdata[0].s;
vdata[3].s = vdata[0].s;
vdata[0].s_lo = QVector2D(sprite_props->texCoords[0], sprite_props->texCoords[1]);
vdata[0].s_hi = QVector2D(sprite_props->texCoords[2] - sprite_props->texCoords[0],
sprite_props->texCoords[3] - sprite_props->texCoords[1]);
vdata[1].s_lo = vdata[0].s_lo;
vdata[1].s_hi = vdata[0].s_hi;
vdata[2].s_lo = vdata[0].s_lo;
vdata[2].s_hi = vdata[0].s_hi;
vdata[3].s_lo = vdata[0].s_lo;
vdata[3].s_hi = vdata[0].s_hi;
vdata[0].o = (vdata[1].p + vdata[3].p) * 0.5;
vdata[1].o = vdata[0].o;
vdata[2].o = vdata[0].o;
vdata[3].o = vdata[0].o;
vdata[0].a = angle;
vdata[1].a = angle;
vdata[2].a = angle;
vdata[3].a = angle;
idata[0] = (props->size - 1) * 4;
idata[1] = idata[0] + 1;
idata[2] = idata[0] + 2;
idata[3] = idata[0] + 3;
}
I followed this tutorial in the mapnik github wiki to make a world map: https://github.com/mapnik/mapnik/wiki/GettingStartedInPython
I modified this example, and have now embedded the code into a Pyside Qt Widget. My question now is, how does one plot points on this map using x and y coordinates, or latitude and longitude points?
Here is the code I'm using to generate the map and to embed it in the widget:
import mapnik
m = mapnik.Map(1200,600)
m.background = mapnik.Color('steelblue')
s = mapnik.Style()
r = mapnik.Rule()
polygon_symbolizer = mapnik.PolygonSymbolizer(mapnik.Color('#f2eff9'))
r.symbols.append(polygon_symbolizer)
line_symbolizer = mapnik.LineSymbolizer(mapnik.Color('rgb(50%,50%,50%)'),0.1)
r.symbols.append(line_symbolizer)
s.rules.append(r)
m.append_style('My Style',s)
ds = mapnik.Shapefile(file='/home/lee/shapefiles/ne_110m_admin_0_countries.shp')
layer = mapnik.Layer('world')
layer.datasource = ds
layer.styles.append('My Style')
m.layers.append(layer)
m.zoom_all()
im = mapnik.Image(1200,600)
mapnik.render(m, im)
qim = QImage()
qim.loadFromData(QByteArray(im.tostring('png')))
label = QLabel(self)
label.setPixmap(QPixmap.fromImage(qim))
self.layout.addWidget(label)
Usually, you would connect your map to a datasource such as a PostGIS or SQLite database and let mapnik populate the points from said database, similar to something like this. Either in a python script or generated from xml.
However, in answer to your question, you could plot Lat/Lon points by creating a new Feature from a WKT string and adding that feature to a mapnik.MemoryDatasource().
Below is a simple snippet from a script using the mapfile found here
First we create our style and add it to our map:
s = mapnik.Style() # style object to hold rules
r = mapnik.Rule() # rule object to hold symbolizers
point_sym = mapnik.PointSymbolizer()
point_sym.filename = './symbols/airport.p.16.png'
r.symbols.append(point_sym) # add the symbolizer to the rule object
s.rules.append(r)
m.append_style('airport point', s)
Now we create our data source and add a Point geometry in WKT format:
ds = mapnik.MemoryDatasource()
f = mapnik.Feature(mapnik.Context(), 1)
f.add_geometries_from_wkt("POINT(-92.289595 34.746481)")
ds.add_feature(f)
Now we must create a new layer, add our style that we created, and add the layer to our map:
player = mapnik.Layer('airport_layer')
#since our map is mercator but you wanted to add lat lon points
#we must make sure our layer projection is set to lat lon
player.srs = longlat.params()
player.datasource = ds
player.styles.append('airport point')
m.layers.append(player)
m.zoom_all()
You can look at the entire script here.
If you need to get a geographic coordinate(ie:lat/lon) from the pixel coordinate, you probably need to add your converter functions.
The Google Maps JS code is as follow could perhaps help :
https://developers.google.com/maps/documentation/javascript/examples/map-coordinates
var TILE_SIZE = 256;
function bound(value, opt_min, opt_max) {
if (opt_min != null) value = Math.max(value, opt_min);
if (opt_max != null) value = Math.min(value, opt_max);
return value;
}
function degreesToRadians(deg) {
return deg * (Math.PI / 180);
}
function radiansToDegrees(rad) {
return rad / (Math.PI / 180);
}
/** #constructor */
function MercatorProjection() {
this.pixelOrigin_ = new google.maps.Point(TILE_SIZE / 2,
TILE_SIZE / 2);
this.pixelsPerLonDegree_ = TILE_SIZE / 360;
this.pixelsPerLonRadian_ = TILE_SIZE / (2 * Math.PI);
}
MercatorProjection.prototype.fromLatLngToPoint = function(latLng,
opt_point) {
var me = this;
var point = opt_point || new google.maps.Point(0, 0);
var origin = me.pixelOrigin_;
point.x = origin.x + latLng.lng() * me.pixelsPerLonDegree_;
// Truncating to 0.9999 effectively limits latitude to 89.189. This is
// about a third of a tile past the edge of the world tile.
var siny = bound(Math.sin(degreesToRadians(latLng.lat())), -0.9999,
0.9999);
point.y = origin.y + 0.5 * Math.log((1 + siny) / (1 - siny)) *
-me.pixelsPerLonRadian_;
return point;
};
MercatorProjection.prototype.fromPointToLatLng = function(point) {
var me = this;
var origin = me.pixelOrigin_;
var lng = (point.x - origin.x) / me.pixelsPerLonDegree_;
var latRadians = (point.y - origin.y) / -me.pixelsPerLonRadian_;
var lat = radiansToDegrees(2 * Math.atan(Math.exp(latRadians)) -
Math.PI / 2);
return new google.maps.LatLng(lat, lng);
};
for(unsigned int mBlock = 0; mBlock < coords.size(); mBlock++)
{
WidgetType widgetType;
height = macBlockWidth + coords[mBlock].y;
width = macBlockHeight + coords[mBlock].x;
macBlockParent = new QWidget;
cooefsLink = new QPushButton(macBlockParent);
macBlock = new QWidget(macBlockParent);
widgetType.widget = macBlock;
widgetType.type = (macBlocks[mBlock][2] != 'S')
? (macBlocks[mBlock][0]) : (macBlocks[mBlock][2]);
blockWidgetTypes.push_back(widgetType);
connect(cooefsLink, SIGNAL(released()),
this, SLOT(showCoefficients()));
buttonSignals[cooefsLink] = mBlock;
constructMotionVector(mBlock);
macBlockLayout->addWidget(macBlockParent, height - 16, width - 16);
styleMacroBlocks(mBlock);
}
could I make a function out of this for loop where I could parallel the operation by splitting it into two different for loops both operating on the vector at the same time. One working on the first half items and the second thread building the second half. So for example
Thread 1
for(unsigned int mBlock = 0; mBlock < coords.size() / 2; mBlock++)
{
WidgetType widgetType;
height = macBlockWidth + coords[mBlock].y;
width = macBlockHeight + coords[mBlock].x;
macBlockParent = new QWidget;
cooefsLink = new QPushButton(macBlockParent);
macBlock = new QWidget(macBlockParent);
widgetType.widget = macBlock;
widgetType.type = (macBlocks[mBlock][2] != 'S')
? (macBlocks[mBlock][0]) : (macBlocks[mBlock][2]);
blockWidgetTypes.push_back(widgetType);
connect(cooefsLink, SIGNAL(released()),
this, SLOT(showCoefficients()));
buttonSignals[cooefsLink] = mBlock;
constructMotionVector(mBlock);
macBlockLayout->addWidget(macBlockParent, height - 16, width - 16);
styleMacroBlocks(mBlock);
}
Thread 2
for(unsigned int mBlock = coords.size() / 2; mBlock < coords.size(); mBlock++)
{
WidgetType widgetType;
height = macBlockWidth + coords[mBlock].y;
width = macBlockHeight + coords[mBlock].x;
macBlockParent = new QWidget;
cooefsLink = new QPushButton(macBlockParent);
macBlock = new QWidget(macBlockParent);
widgetType.widget = macBlock;
widgetType.type = (macBlocks[mBlock][2] != 'S')
? (macBlocks[mBlock][0]) : (macBlocks[mBlock][2]);
blockWidgetTypes.push_back(widgetType);
connect(cooefsLink, SIGNAL(released()),
this, SLOT(showCoefficients()));
buttonSignals[cooefsLink] = mBlock;
constructMotionVector(mBlock);
macBlockLayout->addWidget(macBlockParent, height - 16, width - 16);
styleMacroBlocks(mBlock);
}
Because its a real bottleneck for my system and I notice its only using one CPU and its maxing out that CPU. Any help would be great thanks.
Hm... If you have constructions like this: blockWidgetTypes.push_back(widgetType); in both threads, it's seems very dangerous for multithreaded execution.