NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "mysql_identifier_quote_character" /apps/nobleprog-website/includes/functions/new-modules-general-functions.php:82 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [line] => 82 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "mysql_identifier_quote_character" [2] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [3] => 82 ) ) [1] => Array ( [file] => /apps/hitra7/drupal7/includes/database/mysql/database.inc [line] => 397 [function] => variable_get [args] => Array ( [0] => mysql_identifier_quote_character [1] => ` ) ) [2] => Array ( [file] => /apps/hitra7/drupal7/includes/database/database.inc [line] => 329 [function] => setPrefix [class] => DatabaseConnection_mysql [object] => DatabaseConnection_mysql Object ( [target:protected] => [key:protected] => [logger:protected] => [transactionLayers:protected] => Array ( ) [driverClasses:protected] => Array ( ) [statementClass:protected] => DatabaseStatementBase [transactionSupport:protected] => 1 [transactionalDDLSupport:protected] => [temporaryNameIndex:protected] => 0 [connection:protected] => [connectionOptions:protected] => Array ( [driver] => mysql [database] => hitrahr [username] => root [password] => asdf [host] => localhost [prefix] => Array ( [default] => ) ) [schema:protected] => [prefixes:protected] => Array ( [default] => ) [prefixSearch:protected] => Array ( [0] => { [1] => } ) [prefixReplace:protected] => Array ( [0] => [1] => ) [escapedNames:protected] => Array ( ) [escapedAliases:protected] => Array ( ) [unprefixedTablesMap:protected] => Array ( ) [needsCleanup:protected] => [reservedKeyWords:DatabaseConnection_mysql:private] => Array ( [0] => accessible [1] => add [2] => admin [3] => all [4] => alter [5] => analyze [6] => and [7] => as [8] => asc [9] => asensitive [10] => before [11] => between [12] => bigint [13] => binary [14] => blob [15] => both [16] => by [17] => call [18] => cascade [19] => case [20] => change [21] => char [22] => character [23] => check [24] => collate [25] => column [26] => condition [27] => constraint [28] => continue [29] => convert [30] => create [31] => cross [32] => cube [33] => cume_dist [34] => current_date [35] => current_time [36] => current_timestamp [37] => current_user [38] => cursor [39] => database [40] => databases [41] => day_hour [42] => day_microsecond [43] => day_minute [44] => day_second [45] => dec [46] => decimal [47] => declare [48] => default [49] => delayed [50] => delete [51] => dense_rank [52] => desc [53] => describe [54] => deterministic [55] => distinct [56] => distinctrow [57] => div [58] => double [59] => drop [60] => dual [61] => each [62] => else [63] => elseif [64] => empty [65] => enclosed [66] => escaped [67] => except [68] => exists [69] => exit [70] => explain [71] => false [72] => fetch [73] => first_value [74] => float [75] => float4 [76] => float8 [77] => for [78] => force [79] => foreign [80] => from [81] => fulltext [82] => function [83] => generated [84] => get [85] => grant [86] => group [87] => grouping [88] => groups [89] => having [90] => high_priority [91] => hour_microsecond [92] => hour_minute [93] => hour_second [94] => if [95] => ignore [96] => in [97] => index [98] => infile [99] => inner [100] => inout [101] => insensitive [102] => insert [103] => int [104] => int1 [105] => int2 [106] => int3 [107] => int4 [108] => int8 [109] => integer [110] => intersect [111] => interval [112] => into [113] => io_after_gtids [114] => io_before_gtids [115] => is [116] => iterate [117] => join [118] => json_table [119] => key [120] => keys [121] => kill [122] => lag [123] => last_value [124] => lateral [125] => lead [126] => leading [127] => leave [128] => left [129] => like [130] => limit [131] => linear [132] => lines [133] => load [134] => localtime [135] => localtimestamp [136] => lock [137] => long [138] => longblob [139] => longtext [140] => loop [141] => low_priority [142] => master_bind [143] => master_ssl_verify_server_cert [144] => match [145] => maxvalue [146] => mediumblob [147] => mediumint [148] => mediumtext [149] => middleint [150] => minute_microsecond [151] => minute_second [152] => mod [153] => modifies [154] => natural [155] => not [156] => no_write_to_binlog [157] => nth_value [158] => ntile [159] => null [160] => numeric [161] => of [162] => on [163] => optimize [164] => optimizer_costs [165] => option [166] => optionally [167] => or [168] => order [169] => out [170] => outer [171] => outfile [172] => over [173] => partition [174] => percent_rank [175] => persist [176] => persist_only [177] => precision [178] => primary [179] => procedure [180] => purge [181] => range [182] => rank [183] => read [184] => reads [185] => read_write [186] => real [187] => recursive [188] => references [189] => regexp [190] => release [191] => rename [192] => repeat [193] => replace [194] => require [195] => resignal [196] => restrict [197] => return [198] => revoke [199] => right [200] => rlike [201] => row [202] => rows [203] => row_number [204] => schema [205] => schemas [206] => second_microsecond [207] => select [208] => sensitive [209] => separator [210] => set [211] => show [212] => signal [213] => smallint [214] => spatial [215] => specific [216] => sql [217] => sqlexception [218] => sqlstate [219] => sqlwarning [220] => sql_big_result [221] => sql_calc_found_rows [222] => sql_small_result [223] => ssl [224] => starting [225] => stored [226] => straight_join [227] => system [228] => table [229] => terminated [230] => then [231] => tinyblob [232] => tinyint [233] => tinytext [234] => to [235] => trailing [236] => trigger [237] => true [238] => undo [239] => union [240] => unique [241] => unlock [242] => unsigned [243] => update [244] => usage [245] => use [246] => using [247] => utc_date [248] => utc_time [249] => utc_timestamp [250] => values [251] => varbinary [252] => varchar [253] => varcharacter [254] => varying [255] => virtual [256] => when [257] => where [258] => while [259] => window [260] => with [261] => write [262] => xor [263] => year_month [264] => zerofill ) ) [type] => -> [args] => Array ( [0] => Array ( [default] => ) ) ) [3] => Array ( [file] => /apps/hitra7/drupal7/includes/database/mysql/database.inc [line] => 349 [function] => __construct [class] => DatabaseConnection [object] => DatabaseConnection_mysql Object ( [target:protected] => [key:protected] => [logger:protected] => [transactionLayers:protected] => Array ( ) [driverClasses:protected] => Array ( ) [statementClass:protected] => DatabaseStatementBase [transactionSupport:protected] => 1 [transactionalDDLSupport:protected] => [temporaryNameIndex:protected] => 0 [connection:protected] => [connectionOptions:protected] => Array ( [driver] => mysql [database] => hitrahr [username] => root [password] => asdf [host] => localhost [prefix] => Array ( [default] => ) ) [schema:protected] => [prefixes:protected] => Array ( [default] => ) [prefixSearch:protected] => Array ( [0] => { [1] => } ) [prefixReplace:protected] => Array ( [0] => [1] => ) [escapedNames:protected] => Array ( ) [escapedAliases:protected] => Array ( ) [unprefixedTablesMap:protected] => Array ( ) [needsCleanup:protected] => [reservedKeyWords:DatabaseConnection_mysql:private] => Array ( [0] => accessible [1] => add [2] => admin [3] => all [4] => alter [5] => analyze [6] => and [7] => as [8] => asc [9] => asensitive [10] => before [11] => between [12] => bigint [13] => binary [14] => blob [15] => both [16] => by [17] => call [18] => cascade [19] => case [20] => change [21] => char [22] => character [23] => check [24] => collate [25] => column [26] => condition [27] => constraint [28] => continue [29] => convert [30] => create [31] => cross [32] => cube [33] => cume_dist [34] => current_date [35] => current_time [36] => current_timestamp [37] => current_user [38] => cursor [39] => database [40] => databases [41] => day_hour [42] => day_microsecond [43] => day_minute [44] => day_second [45] => dec [46] => decimal [47] => declare [48] => default [49] => delayed [50] => delete [51] => dense_rank [52] => desc [53] => describe [54] => deterministic [55] => distinct [56] => distinctrow [57] => div [58] => double [59] => drop [60] => dual [61] => each [62] => else [63] => elseif [64] => empty [65] => enclosed [66] => escaped [67] => except [68] => exists [69] => exit [70] => explain [71] => false [72] => fetch [73] => first_value [74] => float [75] => float4 [76] => float8 [77] => for [78] => force [79] => foreign [80] => from [81] => fulltext [82] => function [83] => generated [84] => get [85] => grant [86] => group [87] => grouping [88] => groups [89] => having [90] => high_priority [91] => hour_microsecond [92] => hour_minute [93] => hour_second [94] => if [95] => ignore [96] => in [97] => index [98] => infile [99] => inner [100] => inout [101] => insensitive [102] => insert [103] => int [104] => int1 [105] => int2 [106] => int3 [107] => int4 [108] => int8 [109] => integer [110] => intersect [111] => interval [112] => into [113] => io_after_gtids [114] => io_before_gtids [115] => is [116] => iterate [117] => join [118] => json_table [119] => key [120] => keys [121] => kill [122] => lag [123] => last_value [124] => lateral [125] => lead [126] => leading [127] => leave [128] => left [129] => like [130] => limit [131] => linear [132] => lines [133] => load [134] => localtime [135] => localtimestamp [136] => lock [137] => long [138] => longblob [139] => longtext [140] => loop [141] => low_priority [142] => master_bind [143] => master_ssl_verify_server_cert [144] => match [145] => maxvalue [146] => mediumblob [147] => mediumint [148] => mediumtext [149] => middleint [150] => minute_microsecond [151] => minute_second [152] => mod [153] => modifies [154] => natural [155] => not [156] => no_write_to_binlog [157] => nth_value [158] => ntile [159] => null [160] => numeric [161] => of [162] => on [163] => optimize [164] => optimizer_costs [165] => option [166] => optionally [167] => or [168] => order [169] => out [170] => outer [171] => outfile [172] => over [173] => partition [174] => percent_rank [175] => persist [176] => persist_only [177] => precision [178] => primary [179] => procedure [180] => purge [181] => range [182] => rank [183] => read [184] => reads [185] => read_write [186] => real [187] => recursive [188] => references [189] => regexp [190] => release [191] => rename [192] => repeat [193] => replace [194] => require [195] => resignal [196] => restrict [197] => return [198] => revoke [199] => right [200] => rlike [201] => row [202] => rows [203] => row_number [204] => schema [205] => schemas [206] => second_microsecond [207] => select [208] => sensitive [209] => separator [210] => set [211] => show [212] => signal [213] => smallint [214] => spatial [215] => specific [216] => sql [217] => sqlexception [218] => sqlstate [219] => sqlwarning [220] => sql_big_result [221] => sql_calc_found_rows [222] => sql_small_result [223] => ssl [224] => starting [225] => stored [226] => straight_join [227] => system [228] => table [229] => terminated [230] => then [231] => tinyblob [232] => tinyint [233] => tinytext [234] => to [235] => trailing [236] => trigger [237] => true [238] => undo [239] => union [240] => unique [241] => unlock [242] => unsigned [243] => update [244] => usage [245] => use [246] => using [247] => utc_date [248] => utc_time [249] => utc_timestamp [250] => values [251] => varbinary [252] => varchar [253] => varcharacter [254] => varying [255] => virtual [256] => when [257] => where [258] => while [259] => window [260] => with [261] => write [262] => xor [263] => year_month [264] => zerofill ) ) [type] => -> [args] => Array ( [0] => mysql:host=localhost;port=3306;charset=utf8;dbname=hitrahr [1] => root [2] => asdf [3] => Array ( [1000] => 1 [20] => 1 [17] => 1 [1013] => ) ) ) [4] => Array ( [file] => /apps/hitra7/drupal7/includes/database/database.inc [line] => 1796 [function] => __construct [class] => DatabaseConnection_mysql [object] => DatabaseConnection_mysql Object ( [target:protected] => [key:protected] => [logger:protected] => [transactionLayers:protected] => Array ( ) [driverClasses:protected] => Array ( ) [statementClass:protected] => DatabaseStatementBase [transactionSupport:protected] => 1 [transactionalDDLSupport:protected] => [temporaryNameIndex:protected] => 0 [connection:protected] => [connectionOptions:protected] => Array ( [driver] => mysql [database] => hitrahr [username] => root [password] => asdf [host] => localhost [prefix] => Array ( [default] => ) ) [schema:protected] => [prefixes:protected] => Array ( [default] => ) [prefixSearch:protected] => Array ( [0] => { [1] => } ) [prefixReplace:protected] => Array ( [0] => [1] => ) [escapedNames:protected] => Array ( ) [escapedAliases:protected] => Array ( ) [unprefixedTablesMap:protected] => Array ( ) [needsCleanup:protected] => [reservedKeyWords:DatabaseConnection_mysql:private] => Array ( [0] => accessible [1] => add [2] => admin [3] => all [4] => alter [5] => analyze [6] => and [7] => as [8] => asc [9] => asensitive [10] => before [11] => between [12] => bigint [13] => binary [14] => blob [15] => both [16] => by [17] => call [18] => cascade [19] => case [20] => change [21] => char [22] => character [23] => check [24] => collate [25] => column [26] => condition [27] => constraint [28] => continue [29] => convert [30] => create [31] => cross [32] => cube [33] => cume_dist [34] => current_date [35] => current_time [36] => current_timestamp [37] => current_user [38] => cursor [39] => database [40] => databases [41] => day_hour [42] => day_microsecond [43] => day_minute [44] => day_second [45] => dec [46] => decimal [47] => declare [48] => default [49] => delayed [50] => delete [51] => dense_rank [52] => desc [53] => describe [54] => deterministic [55] => distinct [56] => distinctrow [57] => div [58] => double [59] => drop [60] => dual [61] => each [62] => else [63] => elseif [64] => empty [65] => enclosed [66] => escaped [67] => except [68] => exists [69] => exit [70] => explain [71] => false [72] => fetch [73] => first_value [74] => float [75] => float4 [76] => float8 [77] => for [78] => force [79] => foreign [80] => from [81] => fulltext [82] => function [83] => generated [84] => get [85] => grant [86] => group [87] => grouping [88] => groups [89] => having [90] => high_priority [91] => hour_microsecond [92] => hour_minute [93] => hour_second [94] => if [95] => ignore [96] => in [97] => index [98] => infile [99] => inner [100] => inout [101] => insensitive [102] => insert [103] => int [104] => int1 [105] => int2 [106] => int3 [107] => int4 [108] => int8 [109] => integer [110] => intersect [111] => interval [112] => into [113] => io_after_gtids [114] => io_before_gtids [115] => is [116] => iterate [117] => join [118] => json_table [119] => key [120] => keys [121] => kill [122] => lag [123] => last_value [124] => lateral [125] => lead [126] => leading [127] => leave [128] => left [129] => like [130] => limit [131] => linear [132] => lines [133] => load [134] => localtime [135] => localtimestamp [136] => lock [137] => long [138] => longblob [139] => longtext [140] => loop [141] => low_priority [142] => master_bind [143] => master_ssl_verify_server_cert [144] => match [145] => maxvalue [146] => mediumblob [147] => mediumint [148] => mediumtext [149] => middleint [150] => minute_microsecond [151] => minute_second [152] => mod [153] => modifies [154] => natural [155] => not [156] => no_write_to_binlog [157] => nth_value [158] => ntile [159] => null [160] => numeric [161] => of [162] => on [163] => optimize [164] => optimizer_costs [165] => option [166] => optionally [167] => or [168] => order [169] => out [170] => outer [171] => outfile [172] => over [173] => partition [174] => percent_rank [175] => persist [176] => persist_only [177] => precision [178] => primary [179] => procedure [180] => purge [181] => range [182] => rank [183] => read [184] => reads [185] => read_write [186] => real [187] => recursive [188] => references [189] => regexp [190] => release [191] => rename [192] => repeat [193] => replace [194] => require [195] => resignal [196] => restrict [197] => return [198] => revoke [199] => right [200] => rlike [201] => row [202] => rows [203] => row_number [204] => schema [205] => schemas [206] => second_microsecond [207] => select [208] => sensitive [209] => separator [210] => set [211] => show [212] => signal [213] => smallint [214] => spatial [215] => specific [216] => sql [217] => sqlexception [218] => sqlstate [219] => sqlwarning [220] => sql_big_result [221] => sql_calc_found_rows [222] => sql_small_result [223] => ssl [224] => starting [225] => stored [226] => straight_join [227] => system [228] => table [229] => terminated [230] => then [231] => tinyblob [232] => tinyint [233] => tinytext [234] => to [235] => trailing [236] => trigger [237] => true [238] => undo [239] => union [240] => unique [241] => unlock [242] => unsigned [243] => update [244] => usage [245] => use [246] => using [247] => utc_date [248] => utc_time [249] => utc_timestamp [250] => values [251] => varbinary [252] => varchar [253] => varcharacter [254] => varying [255] => virtual [256] => when [257] => where [258] => while [259] => window [260] => with [261] => write [262] => xor [263] => year_month [264] => zerofill ) ) [type] => -> [args] => Array ( [0] => Array ( [driver] => mysql [database] => hitrahr [username] => root [password] => asdf [host] => localhost [prefix] => Array ( [default] => ) [pdo] => Array ( [1000] => 1 [20] => 1 [17] => 1 [1013] => ) ) ) ) [5] => Array ( [file] => /apps/hitra7/drupal7/includes/database/database.inc [line] => 1582 [function] => openConnection [class] => Database [type] => :: [args] => Array ( [0] => hitrahr [1] => default ) ) [6] => Array ( [file] => /apps/hitra7/drupal7/includes/database/database.inc [line] => 2467 [function] => getConnection [class] => Database [type] => :: [args] => Array ( [0] => default ) ) [7] => Array ( [file] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [line] => 31 [function] => db_query [args] => Array ( [0] => SELECT entity_id FROM field_data_field_url_alias WHERE field_url_alias_value = :alias AND entity_type = 'taxonomy_term' AND language = :language [1] => Array ( [:alias] => cc [:language] => en ) ) ) [8] => Array ( [file] => /apps/nobleprog-website/includes/functions/category-functions.php [line] => 149 [function] => np_db_query [args] => Array ( [0] => hitrahr [1] => db_query [2] => SELECT entity_id FROM field_data_field_url_alias WHERE field_url_alias_value = :alias AND entity_type = 'taxonomy_term' AND language = :language [3] => Array ( [:alias] => cc [:language] => en ) ) ) [9] => Array ( [file] => /apps/nobleprog-website/routes.logic.php [line] => 75 [function] => category_validate_url_alias [args] => Array ( [0] => cc ) ) [10] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 86 [function] => check_for_module [args] => Array ( [0] => /en/cc/bdatr [1] => Array ( [0] => [1] => cc [2] => bdatr [3] => en ) ) ) [11] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [12] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "mysql_identifier_quote_character" /apps/nobleprog-website/includes/functions/new-modules-general-functions.php:82 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [line] => 82 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "mysql_identifier_quote_character" [2] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [3] => 82 ) ) [1] => Array ( [file] => /apps/hitra7/drupal7/includes/database/mysql/database.inc [line] => 397 [function] => variable_get [args] => Array ( [0] => mysql_identifier_quote_character [1] => ` ) ) [2] => Array ( [file] => /apps/hitra7/drupal7/includes/database/database.inc [line] => 329 [function] => setPrefix [class] => DatabaseConnection_mysql [object] => DatabaseConnection_mysql Object ( [target:protected] => [key:protected] => [logger:protected] => [transactionLayers:protected] => Array ( ) [driverClasses:protected] => Array ( ) [statementClass:protected] => DatabaseStatementBase [transactionSupport:protected] => 1 [transactionalDDLSupport:protected] => [temporaryNameIndex:protected] => 0 [connection:protected] => [connectionOptions:protected] => Array ( [driver] => mysql [database] => common_fe [username] => root [password] => asdf [host] => localhost [prefix] => Array ( [default] => ) ) [schema:protected] => [prefixes:protected] => Array ( [default] => ) [prefixSearch:protected] => Array ( [0] => { [1] => } ) [prefixReplace:protected] => Array ( [0] => [1] => ) [escapedNames:protected] => Array ( ) [escapedAliases:protected] => Array ( ) [unprefixedTablesMap:protected] => Array ( ) [needsCleanup:protected] => [reservedKeyWords:DatabaseConnection_mysql:private] => Array ( [0] => accessible [1] => add [2] => admin [3] => all [4] => alter [5] => analyze [6] => and [7] => as [8] => asc [9] => asensitive [10] => before [11] => between [12] => bigint [13] => binary [14] => blob [15] => both [16] => by [17] => call [18] => cascade [19] => case [20] => change [21] => char [22] => character [23] => check [24] => collate [25] => column [26] => condition [27] => constraint [28] => continue [29] => convert [30] => create [31] => cross [32] => cube [33] => cume_dist [34] => current_date [35] => current_time [36] => current_timestamp [37] => current_user [38] => cursor [39] => database [40] => databases [41] => day_hour [42] => day_microsecond [43] => day_minute [44] => day_second [45] => dec [46] => decimal [47] => declare [48] => default [49] => delayed [50] => delete [51] => dense_rank [52] => desc [53] => describe [54] => deterministic [55] => distinct [56] => distinctrow [57] => div [58] => double [59] => drop [60] => dual [61] => each [62] => else [63] => elseif [64] => empty [65] => enclosed [66] => escaped [67] => except [68] => exists [69] => exit [70] => explain [71] => false [72] => fetch [73] => first_value [74] => float [75] => float4 [76] => float8 [77] => for [78] => force [79] => foreign [80] => from [81] => fulltext [82] => function [83] => generated [84] => get [85] => grant [86] => group [87] => grouping [88] => groups [89] => having [90] => high_priority [91] => hour_microsecond [92] => hour_minute [93] => hour_second [94] => if [95] => ignore [96] => in [97] => index [98] => infile [99] => inner [100] => inout [101] => insensitive [102] => insert [103] => int [104] => int1 [105] => int2 [106] => int3 [107] => int4 [108] => int8 [109] => integer [110] => intersect [111] => interval [112] => into [113] => io_after_gtids [114] => io_before_gtids [115] => is [116] => iterate [117] => join [118] => json_table [119] => key [120] => keys [121] => kill [122] => lag [123] => last_value [124] => lateral [125] => lead [126] => leading [127] => leave [128] => left [129] => like [130] => limit [131] => linear [132] => lines [133] => load [134] => localtime [135] => localtimestamp [136] => lock [137] => long [138] => longblob [139] => longtext [140] => loop [141] => low_priority [142] => master_bind [143] => master_ssl_verify_server_cert [144] => match [145] => maxvalue [146] => mediumblob [147] => mediumint [148] => mediumtext [149] => middleint [150] => minute_microsecond [151] => minute_second [152] => mod [153] => modifies [154] => natural [155] => not [156] => no_write_to_binlog [157] => nth_value [158] => ntile [159] => null [160] => numeric [161] => of [162] => on [163] => optimize [164] => optimizer_costs [165] => option [166] => optionally [167] => or [168] => order [169] => out [170] => outer [171] => outfile [172] => over [173] => partition [174] => percent_rank [175] => persist [176] => persist_only [177] => precision [178] => primary [179] => procedure [180] => purge [181] => range [182] => rank [183] => read [184] => reads [185] => read_write [186] => real [187] => recursive [188] => references [189] => regexp [190] => release [191] => rename [192] => repeat [193] => replace [194] => require [195] => resignal [196] => restrict [197] => return [198] => revoke [199] => right [200] => rlike [201] => row [202] => rows [203] => row_number [204] => schema [205] => schemas [206] => second_microsecond [207] => select [208] => sensitive [209] => separator [210] => set [211] => show [212] => signal [213] => smallint [214] => spatial [215] => specific [216] => sql [217] => sqlexception [218] => sqlstate [219] => sqlwarning [220] => sql_big_result [221] => sql_calc_found_rows [222] => sql_small_result [223] => ssl [224] => starting [225] => stored [226] => straight_join [227] => system [228] => table [229] => terminated [230] => then [231] => tinyblob [232] => tinyint [233] => tinytext [234] => to [235] => trailing [236] => trigger [237] => true [238] => undo [239] => union [240] => unique [241] => unlock [242] => unsigned [243] => update [244] => usage [245] => use [246] => using [247] => utc_date [248] => utc_time [249] => utc_timestamp [250] => values [251] => varbinary [252] => varchar [253] => varcharacter [254] => varying [255] => virtual [256] => when [257] => where [258] => while [259] => window [260] => with [261] => write [262] => xor [263] => year_month [264] => zerofill ) ) [type] => -> [args] => Array ( [0] => Array ( [default] => ) ) ) [3] => Array ( [file] => /apps/hitra7/drupal7/includes/database/mysql/database.inc [line] => 349 [function] => __construct [class] => DatabaseConnection [object] => DatabaseConnection_mysql Object ( [target:protected] => [key:protected] => [logger:protected] => [transactionLayers:protected] => Array ( ) [driverClasses:protected] => Array ( ) [statementClass:protected] => DatabaseStatementBase [transactionSupport:protected] => 1 [transactionalDDLSupport:protected] => [temporaryNameIndex:protected] => 0 [connection:protected] => [connectionOptions:protected] => Array ( [driver] => mysql [database] => common_fe [username] => root [password] => asdf [host] => localhost [prefix] => Array ( [default] => ) ) [schema:protected] => [prefixes:protected] => Array ( [default] => ) [prefixSearch:protected] => Array ( [0] => { [1] => } ) [prefixReplace:protected] => Array ( [0] => [1] => ) [escapedNames:protected] => Array ( ) [escapedAliases:protected] => Array ( ) [unprefixedTablesMap:protected] => Array ( ) [needsCleanup:protected] => [reservedKeyWords:DatabaseConnection_mysql:private] => Array ( [0] => accessible [1] => add [2] => admin [3] => all [4] => alter [5] => analyze [6] => and [7] => as [8] => asc [9] => asensitive [10] => before [11] => between [12] => bigint [13] => binary [14] => blob [15] => both [16] => by [17] => call [18] => cascade [19] => case [20] => change [21] => char [22] => character [23] => check [24] => collate [25] => column [26] => condition [27] => constraint [28] => continue [29] => convert [30] => create [31] => cross [32] => cube [33] => cume_dist [34] => current_date [35] => current_time [36] => current_timestamp [37] => current_user [38] => cursor [39] => database [40] => databases [41] => day_hour [42] => day_microsecond [43] => day_minute [44] => day_second [45] => dec [46] => decimal [47] => declare [48] => default [49] => delayed [50] => delete [51] => dense_rank [52] => desc [53] => describe [54] => deterministic [55] => distinct [56] => distinctrow [57] => div [58] => double [59] => drop [60] => dual [61] => each [62] => else [63] => elseif [64] => empty [65] => enclosed [66] => escaped [67] => except [68] => exists [69] => exit [70] => explain [71] => false [72] => fetch [73] => first_value [74] => float [75] => float4 [76] => float8 [77] => for [78] => force [79] => foreign [80] => from [81] => fulltext [82] => function [83] => generated [84] => get [85] => grant [86] => group [87] => grouping [88] => groups [89] => having [90] => high_priority [91] => hour_microsecond [92] => hour_minute [93] => hour_second [94] => if [95] => ignore [96] => in [97] => index [98] => infile [99] => inner [100] => inout [101] => insensitive [102] => insert [103] => int [104] => int1 [105] => int2 [106] => int3 [107] => int4 [108] => int8 [109] => integer [110] => intersect [111] => interval [112] => into [113] => io_after_gtids [114] => io_before_gtids [115] => is [116] => iterate [117] => join [118] => json_table [119] => key [120] => keys [121] => kill [122] => lag [123] => last_value [124] => lateral [125] => lead [126] => leading [127] => leave [128] => left [129] => like [130] => limit [131] => linear [132] => lines [133] => load [134] => localtime [135] => localtimestamp [136] => lock [137] => long [138] => longblob [139] => longtext [140] => loop [141] => low_priority [142] => master_bind [143] => master_ssl_verify_server_cert [144] => match [145] => maxvalue [146] => mediumblob [147] => mediumint [148] => mediumtext [149] => middleint [150] => minute_microsecond [151] => minute_second [152] => mod [153] => modifies [154] => natural [155] => not [156] => no_write_to_binlog [157] => nth_value [158] => ntile [159] => null [160] => numeric [161] => of [162] => on [163] => optimize [164] => optimizer_costs [165] => option [166] => optionally [167] => or [168] => order [169] => out [170] => outer [171] => outfile [172] => over [173] => partition [174] => percent_rank [175] => persist [176] => persist_only [177] => precision [178] => primary [179] => procedure [180] => purge [181] => range [182] => rank [183] => read [184] => reads [185] => read_write [186] => real [187] => recursive [188] => references [189] => regexp [190] => release [191] => rename [192] => repeat [193] => replace [194] => require [195] => resignal [196] => restrict [197] => return [198] => revoke [199] => right [200] => rlike [201] => row [202] => rows [203] => row_number [204] => schema [205] => schemas [206] => second_microsecond [207] => select [208] => sensitive [209] => separator [210] => set [211] => show [212] => signal [213] => smallint [214] => spatial [215] => specific [216] => sql [217] => sqlexception [218] => sqlstate [219] => sqlwarning [220] => sql_big_result [221] => sql_calc_found_rows [222] => sql_small_result [223] => ssl [224] => starting [225] => stored [226] => straight_join [227] => system [228] => table [229] => terminated [230] => then [231] => tinyblob [232] => tinyint [233] => tinytext [234] => to [235] => trailing [236] => trigger [237] => true [238] => undo [239] => union [240] => unique [241] => unlock [242] => unsigned [243] => update [244] => usage [245] => use [246] => using [247] => utc_date [248] => utc_time [249] => utc_timestamp [250] => values [251] => varbinary [252] => varchar [253] => varcharacter [254] => varying [255] => virtual [256] => when [257] => where [258] => while [259] => window [260] => with [261] => write [262] => xor [263] => year_month [264] => zerofill ) ) [type] => -> [args] => Array ( [0] => mysql:host=localhost;port=3306;charset=utf8;dbname=common_fe [1] => root [2] => asdf [3] => Array ( [1000] => 1 [20] => 1 [17] => 1 [1013] => ) ) ) [4] => Array ( [file] => /apps/hitra7/drupal7/includes/database/database.inc [line] => 1796 [function] => __construct [class] => DatabaseConnection_mysql [object] => DatabaseConnection_mysql Object ( [target:protected] => [key:protected] => [logger:protected] => [transactionLayers:protected] => Array ( ) [driverClasses:protected] => Array ( ) [statementClass:protected] => DatabaseStatementBase [transactionSupport:protected] => 1 [transactionalDDLSupport:protected] => [temporaryNameIndex:protected] => 0 [connection:protected] => [connectionOptions:protected] => Array ( [driver] => mysql [database] => common_fe [username] => root [password] => asdf [host] => localhost [prefix] => Array ( [default] => ) ) [schema:protected] => [prefixes:protected] => Array ( [default] => ) [prefixSearch:protected] => Array ( [0] => { [1] => } ) [prefixReplace:protected] => Array ( [0] => [1] => ) [escapedNames:protected] => Array ( ) [escapedAliases:protected] => Array ( ) [unprefixedTablesMap:protected] => Array ( ) [needsCleanup:protected] => [reservedKeyWords:DatabaseConnection_mysql:private] => Array ( [0] => accessible [1] => add [2] => admin [3] => all [4] => alter [5] => analyze [6] => and [7] => as [8] => asc [9] => asensitive [10] => before [11] => between [12] => bigint [13] => binary [14] => blob [15] => both [16] => by [17] => call [18] => cascade [19] => case [20] => change [21] => char [22] => character [23] => check [24] => collate [25] => column [26] => condition [27] => constraint [28] => continue [29] => convert [30] => create [31] => cross [32] => cube [33] => cume_dist [34] => current_date [35] => current_time [36] => current_timestamp [37] => current_user [38] => cursor [39] => database [40] => databases [41] => day_hour [42] => day_microsecond [43] => day_minute [44] => day_second [45] => dec [46] => decimal [47] => declare [48] => default [49] => delayed [50] => delete [51] => dense_rank [52] => desc [53] => describe [54] => deterministic [55] => distinct [56] => distinctrow [57] => div [58] => double [59] => drop [60] => dual [61] => each [62] => else [63] => elseif [64] => empty [65] => enclosed [66] => escaped [67] => except [68] => exists [69] => exit [70] => explain [71] => false [72] => fetch [73] => first_value [74] => float [75] => float4 [76] => float8 [77] => for [78] => force [79] => foreign [80] => from [81] => fulltext [82] => function [83] => generated [84] => get [85] => grant [86] => group [87] => grouping [88] => groups [89] => having [90] => high_priority [91] => hour_microsecond [92] => hour_minute [93] => hour_second [94] => if [95] => ignore [96] => in [97] => index [98] => infile [99] => inner [100] => inout [101] => insensitive [102] => insert [103] => int [104] => int1 [105] => int2 [106] => int3 [107] => int4 [108] => int8 [109] => integer [110] => intersect [111] => interval [112] => into [113] => io_after_gtids [114] => io_before_gtids [115] => is [116] => iterate [117] => join [118] => json_table [119] => key [120] => keys [121] => kill [122] => lag [123] => last_value [124] => lateral [125] => lead [126] => leading [127] => leave [128] => left [129] => like [130] => limit [131] => linear [132] => lines [133] => load [134] => localtime [135] => localtimestamp [136] => lock [137] => long [138] => longblob [139] => longtext [140] => loop [141] => low_priority [142] => master_bind [143] => master_ssl_verify_server_cert [144] => match [145] => maxvalue [146] => mediumblob [147] => mediumint [148] => mediumtext [149] => middleint [150] => minute_microsecond [151] => minute_second [152] => mod [153] => modifies [154] => natural [155] => not [156] => no_write_to_binlog [157] => nth_value [158] => ntile [159] => null [160] => numeric [161] => of [162] => on [163] => optimize [164] => optimizer_costs [165] => option [166] => optionally [167] => or [168] => order [169] => out [170] => outer [171] => outfile [172] => over [173] => partition [174] => percent_rank [175] => persist [176] => persist_only [177] => precision [178] => primary [179] => procedure [180] => purge [181] => range [182] => rank [183] => read [184] => reads [185] => read_write [186] => real [187] => recursive [188] => references [189] => regexp [190] => release [191] => rename [192] => repeat [193] => replace [194] => require [195] => resignal [196] => restrict [197] => return [198] => revoke [199] => right [200] => rlike [201] => row [202] => rows [203] => row_number [204] => schema [205] => schemas [206] => second_microsecond [207] => select [208] => sensitive [209] => separator [210] => set [211] => show [212] => signal [213] => smallint [214] => spatial [215] => specific [216] => sql [217] => sqlexception [218] => sqlstate [219] => sqlwarning [220] => sql_big_result [221] => sql_calc_found_rows [222] => sql_small_result [223] => ssl [224] => starting [225] => stored [226] => straight_join [227] => system [228] => table [229] => terminated [230] => then [231] => tinyblob [232] => tinyint [233] => tinytext [234] => to [235] => trailing [236] => trigger [237] => true [238] => undo [239] => union [240] => unique [241] => unlock [242] => unsigned [243] => update [244] => usage [245] => use [246] => using [247] => utc_date [248] => utc_time [249] => utc_timestamp [250] => values [251] => varbinary [252] => varchar [253] => varcharacter [254] => varying [255] => virtual [256] => when [257] => where [258] => while [259] => window [260] => with [261] => write [262] => xor [263] => year_month [264] => zerofill ) ) [type] => -> [args] => Array ( [0] => Array ( [driver] => mysql [database] => common_fe [username] => root [password] => asdf [host] => localhost [prefix] => Array ( [default] => ) [pdo] => Array ( [1000] => 1 [20] => 1 [17] => 1 [1013] => ) ) ) ) [5] => Array ( [file] => /apps/hitra7/drupal7/includes/database/database.inc [line] => 1582 [function] => openConnection [class] => Database [type] => :: [args] => Array ( [0] => common_fe [1] => default ) ) [6] => Array ( [file] => /apps/hitra7/drupal7/includes/database/database.inc [line] => 2467 [function] => getConnection [class] => Database [type] => :: [args] => Array ( [0] => default ) ) [7] => Array ( [file] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [line] => 31 [function] => db_query [args] => Array ( [0] => SELECT * FROM price_formulas WHERE country_code = :country_code [1] => Array ( [:country_code] => ec ) ) ) [8] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 111 [function] => np_db_query [args] => Array ( [0] => common_fe [1] => db_query [2] => SELECT * FROM price_formulas WHERE country_code = :country_code [3] => Array ( [:country_code] => ec ) ) ) [9] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 93 [function] => get_formula [args] => Array ( [0] => ec ) ) [10] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 355 [function] => course_price_v2_formula [args] => Array ( ) ) [11] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 344 [function] => course_price_change_to_fe_p [args] => Array ( [0] => bdatr [1] => 14 [2] => uk_premium,ca_high,za_premium,pl_2500 [3] => 0 [4] => [5] => USD ) ) [12] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 316 [function] => course_price_get_default_price [args] => Array ( [0] => bdatr ) ) [13] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 15 [function] => course_price_get_price [args] => Array ( [0] => bdatr ) ) [14] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 23 [function] => course_price_virtual_event_price [args] => Array ( [0] => bdatr ) ) [15] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [16] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [17] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [18] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "sdp" /apps/nobleprog-website/includes/functions/course-prices.php:281 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 281 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "sdp" [2] => /apps/nobleprog-website/includes/functions/course-prices.php [3] => 281 ) ) [1] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 45 [function] => course_price_table [args] => Array ( [0] => Array ( [fdp] => 5437 [adp] => 937 [reduced_fdp] => [reduced_adp] => [days] => 2 [default_venue_fdc] => 350 [default_venue_adc] => 50 [people] => 1 [hours] => 14 [course_code] => bdatr [venue_id] => ec_15661446 [vfdc] => 175.00 [vadc] => 60.00 ) [1] => 10 ) ) [2] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 23 [function] => course_price_virtual_event_price [args] => Array ( [0] => bdatr ) ) [3] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [4] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [5] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [6] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "nobleprog_default_trainer_journey" /apps/nobleprog-website/includes/functions/course-prices.php:286 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 286 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "nobleprog_default_trainer_journey" [2] => /apps/nobleprog-website/includes/functions/course-prices.php [3] => 286 ) ) [1] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 45 [function] => course_price_table [args] => Array ( [0] => Array ( [fdp] => 5437 [adp] => 937 [reduced_fdp] => [reduced_adp] => [days] => 2 [default_venue_fdc] => 350 [default_venue_adc] => 50 [people] => 1 [hours] => 14 [course_code] => bdatr [venue_id] => ec_15661446 [vfdc] => 175.00 [vadc] => 60.00 ) [1] => 10 ) ) [2] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 23 [function] => course_price_virtual_event_price [args] => Array ( [0] => bdatr ) ) [3] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [4] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [5] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [6] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "nobleprog_price_rounding" /apps/nobleprog-website/includes/functions/course-prices.php:289 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 289 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "nobleprog_price_rounding" [2] => /apps/nobleprog-website/includes/functions/course-prices.php [3] => 289 ) ) [1] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 45 [function] => course_price_table [args] => Array ( [0] => Array ( [fdp] => 5437 [adp] => 937 [reduced_fdp] => [reduced_adp] => [days] => 2 [default_venue_fdc] => 350 [default_venue_adc] => 50 [people] => 1 [hours] => 14 [course_code] => bdatr [venue_id] => ec_15661446 [vfdc] => 175.00 [vadc] => 60.00 ) [1] => 10 ) ) [2] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 23 [function] => course_price_virtual_event_price [args] => Array ( [0] => bdatr ) ) [3] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [4] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [5] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [6] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "sdp" /apps/nobleprog-website/includes/functions/course-prices.php:281 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 281 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "sdp" [2] => /apps/nobleprog-website/includes/functions/course-prices.php [3] => 281 ) ) [1] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 45 [function] => course_price_table [args] => Array ( [0] => Array ( [fdp] => 5437 [adp] => 937 [reduced_fdp] => [reduced_adp] => [days] => 2 [default_venue_fdc] => 350 [default_venue_adc] => 50 [people] => 1 [hours] => 14 [course_code] => bdatr [venue_id] => ec_15661447 [vfdc] => 200.00 [vadc] => 50.00 ) [1] => 10 ) ) [2] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 23 [function] => course_price_virtual_event_price [args] => Array ( [0] => bdatr ) ) [3] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [4] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [5] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [6] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "nobleprog_default_trainer_journey" /apps/nobleprog-website/includes/functions/course-prices.php:286 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 286 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "nobleprog_default_trainer_journey" [2] => /apps/nobleprog-website/includes/functions/course-prices.php [3] => 286 ) ) [1] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 45 [function] => course_price_table [args] => Array ( [0] => Array ( [fdp] => 5437 [adp] => 937 [reduced_fdp] => [reduced_adp] => [days] => 2 [default_venue_fdc] => 350 [default_venue_adc] => 50 [people] => 1 [hours] => 14 [course_code] => bdatr [venue_id] => ec_15661447 [vfdc] => 200.00 [vadc] => 50.00 ) [1] => 10 ) ) [2] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 23 [function] => course_price_virtual_event_price [args] => Array ( [0] => bdatr ) ) [3] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [4] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [5] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [6] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "nobleprog_price_rounding" /apps/nobleprog-website/includes/functions/course-prices.php:289 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 289 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "nobleprog_price_rounding" [2] => /apps/nobleprog-website/includes/functions/course-prices.php [3] => 289 ) ) [1] => Array ( [file] => /apps/nobleprog-website/includes/functions/course-prices.php [line] => 45 [function] => course_price_table [args] => Array ( [0] => Array ( [fdp] => 5437 [adp] => 937 [reduced_fdp] => [reduced_adp] => [days] => 2 [default_venue_fdc] => 350 [default_venue_adc] => 50 [people] => 1 [hours] => 14 [course_code] => bdatr [venue_id] => ec_15661447 [vfdc] => 200.00 [vadc] => 50.00 ) [1] => 10 ) ) [2] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 23 [function] => course_price_virtual_event_price [args] => Array ( [0] => bdatr ) ) [3] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [4] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [5] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [6] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Cannot modify header information - headers already sent by (output started at /apps/nobleprog-website/_index.php:16) /apps/nobleprog-website/modules/course/course.php:119 Array ( [0] => Array ( [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Cannot modify header information - headers already sent by (output started at /apps/nobleprog-website/_index.php:16) [2] => /apps/nobleprog-website/modules/course/course.php [3] => 119 ) ) [1] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 119 [function] => header [args] => Array ( [0] => X-CSRF-Token:Tm9ibGVQcm9nMTcxNjAyMzk5NQ== ) ) [2] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 82 [function] => course_generate_csrf_token [args] => Array ( ) ) [3] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 31 [function] => course_render [args] => Array ( [0] => Array ( [course_code] => bdatr [hr_nid] => 279166 [title] => Big Data Analytics for Telecom Regulators [requirements] =>

There are no specific requirements needed to attend this course.

[overview] =>

To meet compliance of the regulators, CSPs (Communication service providers) can tap into Big Data Analytics which not only help them to meet compliance but within the scope of same project they can increase customer satisfaction and thus reduce the churn. In fact since compliance is related to Quality of service tied to a contract, any initiative towards meeting the compliance, will improve the “competitive edge” of the CSPs. Therefore, it is important that Regulators should be able to advise/guide a set of Big Data analytic practice for CSPs that will be of mutual benefit between the regulators and CSPs.

The course consists of 8 modules (4 on day 1, and 4 on day 2)

[category_overview] => [outline] =>

1. Module-1 : Case studies of how Telecom Regulators have used Big Data Analytics for imposing compliance :

2. Module-2 : Reviewing Millions of contract between CSPs and its users using unstructured Big data analytics

3. Module -3 : Extracting Structured information from unstructured Customer Contract and map them to Quality of Service obtained from IPDR data & Crowd Sourced app data. Metric for Compliance. Automatic detection of compliance violations.

4. Module- 4 : USING app approach to collect compliance and QoS data- release a free regulatory mobile app to the users to track & Analyze automatically. In this approach regulatory authority will be releasing free app and distribute among the users-and the app will be collecting data on QoS/Spams etc and report it back in analytic dashboard form :

5. Module-5 : Processing of regulatory app data for automatic alarm system generation (alarms will be generated and emailed/sms to stake holders automatically) :
Implementation of dashboard and alarm service

6. Module-6 : Use IPDR data for QoS and Compliance-IPDR Big data analytics:

7. Module-7 : Customer Service Experience & Big Data approach to CSP CRM :

8. Module-8 : Big Data ETL for integrating different QoS data source and combine to a single dashboard alarm based analytics:

[language] => en [duration] => 14 [status] => published [changed] => 1700037381 [source_title] => Big Data Analytics for Telecom Regulators [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) ) [1] => Array ( [0] => stdClass Object ( [tid] => 766 [alias] => big-data-training [name] => Big Data [english_name] => Big Data [consulting_option] => available_promoted ) ) [2] => bdatr [3] => Array ( [outlines] => Array ( [datavault] => stdClass Object ( [course_code] => datavault [hr_nid] => 210132 [title] => Data Vault: Building a Scalable Data Warehouse [requirements] =>

Audience

[overview] =>

Data Vault Modeling is a database modeling technique that provides long-term historical storage of data that originates from multiple sources. A data vault stores a single version of the facts, or "all the data, all the time". Its flexible, scalable, consistent and adaptable design encompasses the best aspects of 3rd normal form (3NF) and star schema.

In this instructor-led, live training, participants will learn how to build a Data Vault.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn how to build a Data Vault.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Data Vault architecture and design principles

Data Vault applications

Data Vault components

Building a Data Vault

Modeling Hubs, Links and Satellites

Data Vault reference rules

How components interact with each other

Modeling and populating a Data Vault

Converting 3NF OLTP to a Data Vault Enterprise Data Warehouse (EDW)

Understanding load dates, end-dates, and join operations

Business keys, relationships, link tables and join techniques

Query techniques

Load processing and query processing

Overview of Matrix Methodology

Getting data into data entities

Loading Hub Entities

Loading Link Entities

Loading Satellites

Using SEI/CMM Level 5 templates to obtain repeatable, reliable, and quantifiable results

Developing a consistent and repeatable ETL (Extract, Transform, Load) process

Building and deploying highly scalable and repeatable warehouses

Closing remarks

[language] => en [duration] => 28 [status] => published [changed] => 1715349914 [source_title] => Data Vault: Building a Scalable Data Warehouse [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => datavault ) [sparkstreaming] => stdClass Object ( [course_code] => sparkstreaming [hr_nid] => 356863 [title] => Spark Streaming with Python and Kafka [requirements] =>

Audience

[overview] =>

Apache Spark Streaming is a scalable, open source stream processing system that allows users to process real-time data from supported sources. Spark Streaming enables fault-tolerant processing of data streams.

This instructor-led, live training (online or onsite) is aimed at data engineers, data scientists, and programmers who wish to use Spark Streaming features in processing and analyzing real-time data.

By the end of this training, participants will be able to use Spark Streaming to process live data streams for use in databases, filesystems, and live dashboards.

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at data engineers, data scientists, and programmers who wish to use Spark Streaming features in processing and analyzing real-time data.

By the end of this training, participants will be able to use Spark Streaming to process live data streams for use in databases, filesystems, and live dashboards.

[outline] =>

Introduction

Overview of Spark Streaming Features and Architecture

Preparing the Environment

Processing Messages

Performing a Windowed Stream Processing

Prototyping the Processing Code

Streaming the Code

Acquiring Stream Output

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037710 [source_title] => Spark Streaming with Python and Kafka [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => sparkstreaming ) [ksql] => stdClass Object ( [course_code] => ksql [hr_nid] => 318463 [title] => Confluent KSQL [requirements] =>

Audience

[overview] =>

Confluent KSQL is a stream processing framework built on top of Apache Kafka. It enables real-time data processing using SQL operations.

This instructor-led, live training (online or onsite) is aimed at developers who wish to implement Apache Kafka stream processing without writing code.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at developers who wish to implement Apache Kafka stream processing without writing code.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Setting up Confluent KSQL

Overview of KSQL Features and Architecture

How KSQL Interacts with Apache Kafka

Use Cases for KSQL

KSQL Command Line and Operations

Ingesting Data (CSV, JSON, etc.)

Creating a Stream

Creating a Table

Advanced KSQL Operations (Joins, Windowing, Aggregations, Geospatial, etc.)

Deploying KSQL to Production

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037528 [source_title] => Confluent KSQL [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => ksql ) [apacheignite] => stdClass Object ( [course_code] => apacheignite [hr_nid] => 209621 [title] => Apache Ignite for Developers [requirements] =>

Audience

[overview] =>

Apache Ignite is an in-memory computing platform that sits between the application and data layer to improve speed, scale, and availability.

This instructor-led, live training (online or onsite) is aimed at developers who wish to learn the principles behind persistent and pure in-memory storage as they step through the creation of a sample in-memory computing project.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at developers who wish to learn the principles behind persistent and pure in-memory storage as they step through the creation of a sample in-memory computing project.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Big Data Tools and Technologies

Installing and Configuring Apache Ignite

Overview of Ignite Architecture

Querying Data in Ignite

Spreading Large Data Sets across a Cluster

Understanding the In-Memory Data Grid

Writing a Service in Ignite

Running Distributed Computing with Ignite

Integrating Ignite with RDBMS, NoSQL, Hadoop and Machine Learning Processors

Testing and Troubleshooting

Summary and Next Steps

[language] => en [duration] => 14 [status] => published [changed] => 1700037322 [source_title] => Apache Ignite for Developers [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => apacheignite ) [beam] => stdClass Object ( [course_code] => beam [hr_nid] => 283646 [title] => Unified Batch and Stream Processing with Apache Beam [requirements] =>

Audience

[overview] =>

Apache Beam is an open source, unified programming model for defining and executing parallel data processing pipelines. It's power lies in its ability to run both batch and streaming pipelines, with execution being carried out by one of Beam's supported distributed processing back-ends: Apache Apex, Apache Flink, Apache Spark, and Google Cloud Dataflow. Apache Beam is useful for ETL (Extract, Transform, and Load) tasks such as moving data between different storage media and data sources, transforming data into a more desirable format, and loading data onto a new system.

In this instructor-led, live training (onsite or remote), participants will learn how to implement the Apache Beam SDKs in a Java or Python application that defines a data processing pipeline for decomposing a big data set into smaller chunks for independent, parallel processing.

By the end of this training, participants will be able to:

Format of the Course

Note

[category_overview] => [outline] =>

Introduction

Installing and Configuring Apache Beam

Overview of Apache Beam Features and Architecture

Understanding the Apache Beam Programming Model

Running a sample pipeline

Designing a Pipeline

Creating the Pipeline

Executing the Pipeline

Testing and Debugging Apache Beam

Processing Bounded and Unbounded Datasets

Making Your Pipelines Reusable and Maintainable

Create New Data Sources and Sinks

Integrating Apache Beam with other Big Data Systems

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 14 [status] => published [changed] => 1700037430 [source_title] => Unified Batch and Stream Processing with Apache Beam [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => beam ) [apex] => stdClass Object ( [course_code] => apex [hr_nid] => 209525 [title] => Apache Apex: Processing Big Data-in-Motion [requirements] =>

Audience

[overview] =>

Apache Apex is a YARN-native platform that unifies stream and batch processing. It processes big data-in-motion in a way that is scalable, performant, fault-tolerant, stateful, secure, distributed, and easily operable.

This instructor-led, live training introduces Apache Apex's unified stream processing architecture, and walks participants through the creation of a distributed application using Apex on Hadoop.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] => [outline] =>

To request a customized course outline for this training, please contact us.

 

[language] => en [duration] => 21 [status] => published [changed] => 1700037320 [source_title] => Apache Apex: Processing Big Data-in-Motion [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => apex ) [storm] => stdClass Object ( [course_code] => storm [hr_nid] => 208253 [title] => Apache Storm [requirements] => [overview] =>

Apache Storm is a distributed, real-time computation engine used for enabling real-time business intelligence. It does so by enabling applications to reliably process unbounded streams of data (a.k.a. stream processing).

"Storm is for real-time processing what Hadoop is for batch processing!"

In this instructor-led live training, participants will learn how to install and configure Apache Storm, then develop and deploy an Apache Storm application for processing big data in real-time.

Some of the topics included in this training include:

Request this course now!

Audience

Format of the course

[category_overview] => [outline] =>

Request a customized course outline for this training!

[language] => en [duration] => 28 [status] => published [changed] => 1700037303 [source_title] => Apache Storm [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => storm ) [nifi] => stdClass Object ( [course_code] => nifi [hr_nid] => 212800 [title] => Apache NiFi for Administrators [requirements] =>

Audience

[overview] =>

Apache NiFi (Hortonworks DataFlow) is a real-time integrated data logistics and simple event processing platform that enables the moving, tracking and automation of data between systems. It is written using flow-based programming and provides a web-based user interface to manage dataflows in real time.

In this instructor-led, live training (onsite or remote), participants will learn how to deploy and manage Apache NiFi in a live lab environment.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

In this instructor-led, live training in <loc> (onsite or remote), participants will learn how to deploy and manage Apache NiFi in a live lab environment.

By the end of this training, participants will be able to:

[outline] =>

Introduction to Apache NiFi   

Overview of Big Data and Apache Hadoop

Setting up and Running a NiFi Cluster

NiFi Operations

Monitoring and Recovery

Optimizing NiFI

Best practices

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 21 [status] => published [changed] => 1700037360 [source_title] => Apache NiFi for Administrators [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => nifi ) [nifidev] => stdClass Object ( [course_code] => nifidev [hr_nid] => 212804 [title] => Apache NiFi for Developers [requirements] =>

Audience

[overview] =>

Apache NiFi (Hortonworks DataFlow) is a real-time integrated data logistics and simple event processing platform that enables the moving, tracking and automation of data between systems. It is written using flow-based programming and provides a web-based user interface to manage dataflows in real time.

In this instructor-led, live training, participants will learn the fundamentals of flow-based programming as they develop a number of demo extensions, components and processors using Apache NiFi.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn the fundamentals of flow-based programming as they develop a number of demo extensions, components and processors using Apache NiFi.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Big Data Tools and Technologies

Installing and Configuring NiFi

Overview of NiFi Architecture

Development Approaches

Design Considerations

Components, Events, and Processor Patterns

Exercise: Streaming Data Feeds into HDFS

Error Handling

Controller Services

Exercise: Ingesting Data from IoT Devices using Web-Based APIs

Exercise: Developing a Custom Apache Nifi Processor using JSON

Testing and Troubleshooting

Contributing to Apache NiFi

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037361 [source_title] => Apache NiFi for Developers [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => nifidev ) [flink] => stdClass Object ( [course_code] => flink [hr_nid] => 209489 [title] => Apache Flink Fundamentals [requirements] =>

Audience

[overview] =>

Apache Flink is an open-source framework for scalable stream and batch data processing.

This instructor-led, live training (online or onsite) introduces the principles and approaches behind distributed stream and batch data processing, and walks participants through the creation of a real-time, data streaming application in Apache Flink.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) introduces the principles and approaches behind distributed stream and batch data processing, and walks participants through the creation of a real-time, data streaming application in Apache Flink.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Installing and Configuring Apache Flink

Overview of Flink Architecture

Developing Data Streaming Applications in Flink

Managing Diverse Workloads

Performing Advanced Analytics

Setting up a Multi-Node Flink Cluster

Mastering Flink DataStream API

Understanding Flink Libraries

Integrating Flink with Other Big Data Tools

Testing and Troubleshooting

Summary and Next Steps

[language] => en [duration] => 28 [status] => published [changed] => 1700037319 [source_title] => Apache Flink Fundamentals [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => flink ) [sparkpython] => stdClass Object ( [course_code] => sparkpython [hr_nid] => 279430 [title] => Python and Spark for Big Data (PySpark) [requirements] =>

Audience

[overview] =>

Python is a high-level programming language famous for its clear syntax and code readibility. Spark is a data processing engine used in querying, analyzing, and transforming big data. PySpark allows users to interface Spark with Python.

In this instructor-led, live training, participants will learn how to use Python and Spark together to analyze big data as they work on hands-on exercises.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn how to use Python and Spark together to analyze big data as they work on hands-on exercises.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Understanding Big Data

Overview of Spark

Overview of Python

Overview of PySpark

Setting Up Python with Spark

Setting Up PySpark

Using Amazon Web Services (AWS) EC2 Instances for Spark

Setting Up Databricks

Setting Up the AWS EMR Cluster

Learning the Basics of Python Programming

Learning the Basics of Spark DataFrame

Working on a Spark DataFrame Project Exercise

Understanding Machine Learning with MLlib

Working with MLlib, Spark, and Python for Machine Learning

Understanding Regressions

Understanding Random Forests and Decision Trees

Working with K-means Clustering

Working with Recommender Systems

Implementing Natural Language Processing

Streaming with Spark on Python

Closing Remarks

[language] => en [duration] => 21 [status] => published [changed] => 1715349940 [source_title] => Python and Spark for Big Data (PySpark) [source_language] => en [cert_code] => [weight] => -998 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => sparkpython ) [graphcomputing] => stdClass Object ( [course_code] => graphcomputing [hr_nid] => 278402 [title] => Introduction to Graph Computing [requirements] =>

Audience

[overview] =>

Many real world problems can be described in terms of graphs. For example, the Web graph, the social network graph, the train network graph and the language graph. These graphs tend to be extremely large; processing them requires a specialized set of tools and processes -- these tools and processes can be referred to as Graph Computing (also known as Graph Analytics).

In this instructor-led, live training, participants will learn about the technology offerings and implementation approaches for processing graph data. The aim is to identify real-world objects, their characteristics and relationships, then model these relationships and process them as data using a Graph Computing (also known as Graph Analytics and Distributed Graph Processing) approach. We start with a broad overview and narrow in on specific tools as we step through a series of case studies, hands-on exercises and live deployments.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn about the technology offerings and implementation approaches for processing graph data. The aim is to identify real-world objects, their characteristics and relationships, then model these relationships and process them as data using a Graph Computing (also known as Graph Analytics) approach. We start with a broad overview and narrow in on specific tools as we step through a series of case studies, hands-on exercises and live deployments.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Understanding Graph Data

Using Graph Databases to Model, Persist and Process Graph Data

Exercise: Modeling Graph Data with neo4j

Beyond Graph Databases: Graph Computing

Solving Real-World Problems with Traversals

Case Study: Ranking Discussion Contributors

Graph Computing: Local, In-Memory Graph toolkits

Exercise: Modeling Graph Data with NetworkX

Graph Computing: Batch Processing Graph Frameworks

Graph Computing: Graph-Parallel Computation

Setup and Installation

GraphX Operators

Iterating with Pregel API

Building a Graph

Designing Scalable Algorithms

Accessing Additional Algorithms

Exercis: Page Rank and Top Users

Deploying to Production

Closing Remarks

[language] => en [duration] => 28 [status] => published [changed] => 1715349940 [source_title] => Introduction to Graph Computing [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => graphcomputing ) [aitech] => stdClass Object ( [course_code] => aitech [hr_nid] => 199320 [title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [requirements] => [overview] =>

This course is aimed at developers and data scientists who wish to understand and implement AI within their applications. Special focus is given to Data Analysis, Distributed AI and NLP.

[category_overview] => [outline] =>
  1. Distribution big data
    1. Data mining methods (training single systems + distributed prediction: traditional machine learning algorithms + Mapreduce distributed prediction)
    2. Apache Spark MLlib
  2. Recommendations and Advertising:
    1. Natural language
    2. Text clustering, text categorization (labeling), synonyms
    3. User profile restore, labeling system
    4. Recommended algorithms
    5. Insuring the accuracy of "lift" between and within categories
    6. How to create closed loops for recommendation algorithms
  3. Logical regression, RankingSVM,
  4. Feature recognition (deep learning and automatic feature recognition for graphics)
  5. Natural language
    1. Chinese word segmentation
    2. Theme model (text clustering)
    3. Text classification
    4. Extract keywords
    5. Semantic analysis, semantic parser, word2vec (vector to word)
    6. RNN long-term memory (TSTM) architecture
[language] => en [duration] => 21 [status] => published [changed] => 1715084120 [source_title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [source_language] => zh-hans [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => aitech ) [spmllib] => stdClass Object ( [course_code] => spmllib [hr_nid] => 141237 [title] => Apache Spark MLlib [requirements] =>

Knowledge of one of the following:

[overview] =>

MLlib is Spark’s machine learning (ML) library. Its goal is to make practical machine learning scalable and easy. It consists of common learning algorithms and utilities, including classification, regression, clustering, collaborative filtering, dimensionality reduction, as well as lower-level optimization primitives and higher-level pipeline APIs.

It divides into two packages:

 

Audience

This course is directed at engineers and developers seeking to utilize a built in Machine Library for Apache Spark

[category_overview] => [outline] =>

spark.mllib: data types, algorithms, and utilities

spark.ml: high-level APIs for ML pipelines

[language] => en [duration] => 35 [status] => published [changed] => 1700037209 [source_title] => Apache Spark MLlib [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => spmllib ) [kdd] => stdClass Object ( [course_code] => kdd [hr_nid] => 200632 [title] => Knowledge Discovery in Databases (KDD) [requirements] => [overview] =>

Knowledge discovery in databases (KDD) is the process of discovering useful knowledge from a collection of data. Real-life applications for this data mining technique include marketing, fraud detection, telecommunication and manufacturing.

In this instructor-led, live course, we introduce the processes involved in KDD and carry out a series of exercises to practice the implementation of those processes.

Audience

Format of the Course

[category_overview] => [outline] =>

Introduction

Establishing the application domain

Establishing relevant prior knowledge

Understanding the goal of the investigation

Creating a target data set

Data cleaning and preprocessing

Data reduction and projection

Choosing the data mining task

Choosing the data mining algorithms

Interpreting the mined patterns

Summary and conclusion

[language] => en [duration] => 21 [status] => published [changed] => 1700037259 [source_title] => Knowledge Discovery in Databases (KDD) [source_language] => en [cert_code] => [weight] => -987 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => kdd ) ) [codes] => Array ( [0] => datavault [1] => sparkstreaming [2] => ksql [3] => apacheignite [4] => beam [5] => apex [6] => storm [7] => nifi [8] => nifidev [9] => flink [10] => sparkpython [11] => graphcomputing [12] => aitech [13] => spmllib [14] => kdd ) ) [4] => Array ( [regions] => Array ( [ec_4966] => Array ( [tid] => ec_4966 [title] => Guayaquil [sales_area] => ec_ecuador [venues] => Array ( [ec_15661446] => Array ( [vid] => ec_15661446 [title] => Guayaquil - Mall del Sol [vfdc] => 175.00 [prices] => Array ( [1] => Array ( [remote guaranteed] => 5437 [classroom guaranteed] => 5787 [remote guaranteed per delegate] => 5437 [delegates] => 1 [adp] => 937 [classroom guaranteed per delegate] => 5787 ) [2] => Array ( [remote guaranteed] => 6374 [classroom guaranteed] => 6844 [remote guaranteed per delegate] => 3187 [delegates] => 2 [adp] => 937 [classroom guaranteed per delegate] => 3422 ) [3] => Array ( [remote guaranteed] => 7311 [classroom guaranteed] => 7902 [remote guaranteed per delegate] => 2437 [delegates] => 3 [adp] => 937 [classroom guaranteed per delegate] => 2634 ) [4] => Array ( [remote guaranteed] => 8248 [classroom guaranteed] => 8960 [remote guaranteed per delegate] => 2062 [delegates] => 4 [adp] => 937 [classroom guaranteed per delegate] => 2240 ) [5] => Array ( [remote guaranteed] => 9185 [classroom guaranteed] => 10015 [remote guaranteed per delegate] => 1837 [delegates] => 5 [adp] => 937 [classroom guaranteed per delegate] => 2003 ) [6] => Array ( [remote guaranteed] => 10122 [classroom guaranteed] => 11070 [remote guaranteed per delegate] => 1687 [delegates] => 6 [adp] => 937 [classroom guaranteed per delegate] => 1845 ) [7] => Array ( [remote guaranteed] => 11060 [classroom guaranteed] => 12131 [remote guaranteed per delegate] => 1580 [delegates] => 7 [adp] => 937 [classroom guaranteed per delegate] => 1733 ) [8] => Array ( [remote guaranteed] => 12000 [classroom guaranteed] => 13184 [remote guaranteed per delegate] => 1500 [delegates] => 8 [adp] => 937 [classroom guaranteed per delegate] => 1648 ) [9] => Array ( [remote guaranteed] => 12933 [classroom guaranteed] => 14247 [remote guaranteed per delegate] => 1437 [delegates] => 9 [adp] => 937 [classroom guaranteed per delegate] => 1583 ) [10] => Array ( [remote guaranteed] => 13870 [classroom guaranteed] => 15300 [remote guaranteed per delegate] => 1387 [delegates] => 10 [adp] => 937 [classroom guaranteed per delegate] => 1530 ) ) ) ) ) [ec_4967] => Array ( [tid] => ec_4967 [title] => Quito [sales_area] => ec_ecuador [venues] => Array ( [ec_15661447] => Array ( [vid] => ec_15661447 [title] => Quito - Av Eloy Alfaro [vfdc] => 200.00 [prices] => Array ( [1] => Array ( [remote guaranteed] => 5437 [classroom guaranteed] => 5837 [remote guaranteed per delegate] => 5437 [delegates] => 1 [adp] => 937 [classroom guaranteed per delegate] => 5837 ) [2] => Array ( [remote guaranteed] => 6374 [classroom guaranteed] => 6874 [remote guaranteed per delegate] => 3187 [delegates] => 2 [adp] => 937 [classroom guaranteed per delegate] => 3437 ) [3] => Array ( [remote guaranteed] => 7311 [classroom guaranteed] => 7911 [remote guaranteed per delegate] => 2437 [delegates] => 3 [adp] => 937 [classroom guaranteed per delegate] => 2637 ) [4] => Array ( [remote guaranteed] => 8248 [classroom guaranteed] => 8948 [remote guaranteed per delegate] => 2062 [delegates] => 4 [adp] => 937 [classroom guaranteed per delegate] => 2237 ) [5] => Array ( [remote guaranteed] => 9185 [classroom guaranteed] => 9985 [remote guaranteed per delegate] => 1837 [delegates] => 5 [adp] => 937 [classroom guaranteed per delegate] => 1997 ) [6] => Array ( [remote guaranteed] => 10122 [classroom guaranteed] => 11022 [remote guaranteed per delegate] => 1687 [delegates] => 6 [adp] => 937 [classroom guaranteed per delegate] => 1837 ) [7] => Array ( [remote guaranteed] => 11060 [classroom guaranteed] => 12061 [remote guaranteed per delegate] => 1580 [delegates] => 7 [adp] => 937 [classroom guaranteed per delegate] => 1723 ) [8] => Array ( [remote guaranteed] => 12000 [classroom guaranteed] => 13096 [remote guaranteed per delegate] => 1500 [delegates] => 8 [adp] => 937 [classroom guaranteed per delegate] => 1637 ) [9] => Array ( [remote guaranteed] => 12933 [classroom guaranteed] => 14130 [remote guaranteed per delegate] => 1437 [delegates] => 9 [adp] => 937 [classroom guaranteed per delegate] => 1570 ) [10] => Array ( [remote guaranteed] => 13870 [classroom guaranteed] => 15170 [remote guaranteed per delegate] => 1387 [delegates] => 10 [adp] => 937 [classroom guaranteed per delegate] => 1517 ) ) ) ) ) ) [remote] => Array ( [1] => Array ( [remote guaranteed] => 5437 [remote guaranteed per delegate] => 5437 [adp] => 937 ) [2] => Array ( [remote guaranteed] => 6374 [remote guaranteed per delegate] => 3187 [adp] => 937 ) [3] => Array ( [remote guaranteed] => 7311 [remote guaranteed per delegate] => 2437 [adp] => 937 ) [4] => Array ( [remote guaranteed] => 8248 [remote guaranteed per delegate] => 2062 [adp] => 937 ) [5] => Array ( [remote guaranteed] => 9185 [remote guaranteed per delegate] => 1837 [adp] => 937 ) [6] => Array ( [remote guaranteed] => 10122 [remote guaranteed per delegate] => 1687 [adp] => 937 ) [7] => Array ( [remote guaranteed] => 11060 [remote guaranteed per delegate] => 1580 [adp] => 937 ) [8] => Array ( [remote guaranteed] => 12000 [remote guaranteed per delegate] => 1500 [adp] => 937 ) [9] => Array ( [remote guaranteed] => 12933 [remote guaranteed per delegate] => 1437 [adp] => 937 ) [10] => Array ( [remote guaranteed] => 13870 [remote guaranteed per delegate] => 1387 [adp] => 937 ) ) [currency] => USD ) [5] => Array ( [0] => 5 [1] => 5 [2] => 4 [3] => 4 [4] => 5 ) [6] => Array ( [479923] => Array ( [title] => Apache NiFi for Developers [rating] => 5 [delegate_and_company] => Pedro [body] => I liked the virtual machine environments because he could easily toggle between the views and help if we were struggling with the material. [mc] => [is_mt] => 0 [nid] => 479923 ) [445523] => Array ( [title] => Python and Spark for Big Data (PySpark) [rating] => 5 [delegate_and_company] => Aurelia-Adriana - Allianz Services Romania [body] => I liked that it was practical. Loved to apply the theoretical knowledge with practical examples. [mc] => [is_mt] => 0 [nid] => 445523 ) [422075] => Array ( [title] => Apache NiFi for Administrators [rating] => 4 [delegate_and_company] => Rolando García - OIT para México y Cuba [body] => Muy poco, se me dificulto mucho y mas por que entre desfasado, no tome los primeras sesiones. [mc] => [is_mt] => 0 [nid] => 422075 ) [404743] => Array ( [title] => Data Vault: Building a Scalable Data Warehouse [rating] => 4 [delegate_and_company] => john ernesto ii fernandez - Philippine AXA Life Insurance Corporation [body] => how the trainor shows his knowledge in the subject he's teachign [mc] => [is_mt] => 0 [nid] => 404743 ) [283902] => Array ( [title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [rating] => 5 [delegate_and_company] => Laura Kahn [body] => This is one of the best hands-on with exercises programming courses I have ever taken. [mc] => This is one of the best hands-on with exercises programming courses I have ever taken. [is_mt] => 0 [nid] => 283902 ) ) [7] => 4.6 [8] => 1 [9] => 1 [10] => ) ) [4] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [5] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [6] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [7] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) Big Data Analytics for Telecom Regulators Training Course

Course Outline

1. Module-1 : Case studies of how Telecom Regulators have used Big Data Analytics for imposing compliance :

  • TRAI ( Telecom Regulatory Authority of India)
  • Turkish Telecom regulator : Telekomünikasyon Kurumu
  • FCC -Federal Communication Commission
  • BTRC – Bangladesh Telecommunication Regulatory Authority

2. Module-2 : Reviewing Millions of contract between CSPs and its users using unstructured Big data analytics

  • Elements of NLP ( Natural Language Processing )
  • Extracting SLA ( service level agreements ) from millions of Contracts
  • Some of the known open source and licensed tool for Contract analysis ( eBravia, IBM Watson, KIRA)
  • Automatic discovery of contract and conflict from Unstructured data analysis

3. Module -3 : Extracting Structured information from unstructured Customer Contract and map them to Quality of Service obtained from IPDR data &amp; Crowd Sourced app data. Metric for Compliance. Automatic detection of compliance violations.

4. Module- 4 : USING app approach to collect compliance and QoS data- release a free regulatory mobile app to the users to track & Analyze automatically. In this approach regulatory authority will be releasing free app and distribute among the users-and the app will be collecting data on QoS/Spams etc and report it back in analytic dashboard form :

  • Intelligent spam detection engine (for SMS only) to assist the subscriber in reporting
  • Crowdsourcing of data about offending messages and calls to speed up detection of unregistered telemarketers
  • Updates about action taken on complaints within the App
  • Automatic reporting of voice call quality ( call drop, one way connection) for those who will have the regulatory app installed
  • Automatic reporting of Data Speed

5. Module-5 : Processing of regulatory app data for automatic alarm system generation (alarms will be generated and emailed/sms to stake holders automatically) :
Implementation of dashboard and alarm service

  • Microsoft Azure based dashboard and SNS alarm service
  • AWS Lambda Service based Dashboard and alarming
  • AWS/Microsoft Analytic suite to crunch the data for Alarm generation
  • Alarm generation rules

6. Module-6 : Use IPDR data for QoS and Compliance-IPDR Big data analytics:

  • Metered billing by service and subscriber usage
  • Network capacity analysis and planning
  • Edge resource management
  • Network inventory and asset management
  • Service-level objective (SLO) monitoring for business services
  • Quality of experience (QOE) monitoring
  • Call Drops
  • Service optimization and product development analytics

7. Module-7 : Customer Service Experience &amp; Big Data approach to CSP CRM :

  • Compliance on Refund policies
  • Subscription fees
  • Meeting SLA and Subscription discount
  • Automatic detection of not meeting SLAs

8. Module-8 : Big Data ETL for integrating different QoS data source and combine to a single dashboard alarm based analytics:

  • Using a PAAS Cloud like AWS Lambda, Microsoft Azure
  • Using a Hybrid cloud approach

Requirements

There are no specific requirements needed to attend this course.

 14 Hours

Number of participants



Price per participant

Testimonials (5)

Related Courses

Data Vault: Building a Scalable Data Warehouse

28 Hours

Spark Streaming with Python and Kafka

7 Hours

Confluent KSQL

7 Hours

Apache Ignite for Developers

14 Hours

Unified Batch and Stream Processing with Apache Beam

14 Hours

Apache Apex: Processing Big Data-in-Motion

21 Hours

Apache Storm

28 Hours

Apache NiFi for Administrators

21 Hours

Apache NiFi for Developers

7 Hours

Apache Flink Fundamentals

28 Hours

Python and Spark for Big Data (PySpark)

21 Hours

Introduction to Graph Computing

28 Hours

Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP

21 Hours

Apache Spark MLlib

35 Hours

Knowledge Discovery in Databases (KDD)

21 Hours

Related Categories

NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "nobleprog_site_production_url" /apps/nobleprog-website/includes/functions/new-modules-general-functions.php:82 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [line] => 82 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "nobleprog_site_production_url" [2] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [3] => 82 ) ) [1] => Array ( [file] => /apps/hitra7/drupal7/sites/all/modules/_custom/frontend/islc7/islc7.module [line] => 131 [function] => variable_get [args] => Array ( [0] => nobleprog_site_production_url ) ) [2] => Array ( [file] => /apps/hitra7/drupal7/sites/all/modules/_custom/frontend/islc7/islc7.module [line] => 94 [function] => islc_get_current_site [args] => Array ( ) ) [3] => Array ( [file] => /apps/hitra7/drupal7/sites/all/modules/_custom/frontend/islc7/islc7_block.inc [line] => 34 [function] => islc_get_site_list [args] => Array ( ) ) [4] => Array ( [file] => /apps/nobleprog-website/nptemplates/default.php [line] => 265 [function] => islc7_sites_links_array_v3 [args] => Array ( ) ) [5] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 85 [args] => Array ( [0] => /apps/nobleprog-website/nptemplates/default.php ) [function] => require_once ) [6] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 31 [function] => course_render [args] => Array ( [0] => Array ( [course_code] => bdatr [hr_nid] => 279166 [title] => Big Data Analytics for Telecom Regulators [requirements] =>

There are no specific requirements needed to attend this course.

[overview] =>

To meet compliance of the regulators, CSPs (Communication service providers) can tap into Big Data Analytics which not only help them to meet compliance but within the scope of same project they can increase customer satisfaction and thus reduce the churn. In fact since compliance is related to Quality of service tied to a contract, any initiative towards meeting the compliance, will improve the “competitive edge” of the CSPs. Therefore, it is important that Regulators should be able to advise/guide a set of Big Data analytic practice for CSPs that will be of mutual benefit between the regulators and CSPs.

The course consists of 8 modules (4 on day 1, and 4 on day 2)

[category_overview] => [outline] =>

1. Module-1 : Case studies of how Telecom Regulators have used Big Data Analytics for imposing compliance :

2. Module-2 : Reviewing Millions of contract between CSPs and its users using unstructured Big data analytics

3. Module -3 : Extracting Structured information from unstructured Customer Contract and map them to Quality of Service obtained from IPDR data &amp; Crowd Sourced app data. Metric for Compliance. Automatic detection of compliance violations.

4. Module- 4 : USING app approach to collect compliance and QoS data- release a free regulatory mobile app to the users to track & Analyze automatically. In this approach regulatory authority will be releasing free app and distribute among the users-and the app will be collecting data on QoS/Spams etc and report it back in analytic dashboard form :

5. Module-5 : Processing of regulatory app data for automatic alarm system generation (alarms will be generated and emailed/sms to stake holders automatically) :
Implementation of dashboard and alarm service

6. Module-6 : Use IPDR data for QoS and Compliance-IPDR Big data analytics:

7. Module-7 : Customer Service Experience &amp; Big Data approach to CSP CRM :

8. Module-8 : Big Data ETL for integrating different QoS data source and combine to a single dashboard alarm based analytics:

[language] => en [duration] => 14 [status] => published [changed] => 1700037381 [source_title] => Big Data Analytics for Telecom Regulators [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) ) [1] => Array ( [0] => stdClass Object ( [tid] => 766 [alias] => big-data-training [name] => Big Data [english_name] => Big Data [consulting_option] => available_promoted ) ) [2] => bdatr [3] => Array ( [outlines] => Array ( [datavault] => stdClass Object ( [course_code] => datavault [hr_nid] => 210132 [title] => Data Vault: Building a Scalable Data Warehouse [requirements] =>

Audience

[overview] =>

Data Vault Modeling is a database modeling technique that provides long-term historical storage of data that originates from multiple sources. A data vault stores a single version of the facts, or "all the data, all the time". Its flexible, scalable, consistent and adaptable design encompasses the best aspects of 3rd normal form (3NF) and star schema.

In this instructor-led, live training, participants will learn how to build a Data Vault.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn how to build a Data Vault.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Data Vault architecture and design principles

Data Vault applications

Data Vault components

Building a Data Vault

Modeling Hubs, Links and Satellites

Data Vault reference rules

How components interact with each other

Modeling and populating a Data Vault

Converting 3NF OLTP to a Data Vault Enterprise Data Warehouse (EDW)

Understanding load dates, end-dates, and join operations

Business keys, relationships, link tables and join techniques

Query techniques

Load processing and query processing

Overview of Matrix Methodology

Getting data into data entities

Loading Hub Entities

Loading Link Entities

Loading Satellites

Using SEI/CMM Level 5 templates to obtain repeatable, reliable, and quantifiable results

Developing a consistent and repeatable ETL (Extract, Transform, Load) process

Building and deploying highly scalable and repeatable warehouses

Closing remarks

[language] => en [duration] => 28 [status] => published [changed] => 1715349914 [source_title] => Data Vault: Building a Scalable Data Warehouse [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => datavault ) [sparkstreaming] => stdClass Object ( [course_code] => sparkstreaming [hr_nid] => 356863 [title] => Spark Streaming with Python and Kafka [requirements] =>

Audience

[overview] =>

Apache Spark Streaming is a scalable, open source stream processing system that allows users to process real-time data from supported sources. Spark Streaming enables fault-tolerant processing of data streams.

This instructor-led, live training (online or onsite) is aimed at data engineers, data scientists, and programmers who wish to use Spark Streaming features in processing and analyzing real-time data.

By the end of this training, participants will be able to use Spark Streaming to process live data streams for use in databases, filesystems, and live dashboards.

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at data engineers, data scientists, and programmers who wish to use Spark Streaming features in processing and analyzing real-time data.

By the end of this training, participants will be able to use Spark Streaming to process live data streams for use in databases, filesystems, and live dashboards.

[outline] =>

Introduction

Overview of Spark Streaming Features and Architecture

Preparing the Environment

Processing Messages

Performing a Windowed Stream Processing

Prototyping the Processing Code

Streaming the Code

Acquiring Stream Output

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037710 [source_title] => Spark Streaming with Python and Kafka [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => sparkstreaming ) [ksql] => stdClass Object ( [course_code] => ksql [hr_nid] => 318463 [title] => Confluent KSQL [requirements] =>

Audience

[overview] =>

Confluent KSQL is a stream processing framework built on top of Apache Kafka. It enables real-time data processing using SQL operations.

This instructor-led, live training (online or onsite) is aimed at developers who wish to implement Apache Kafka stream processing without writing code.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at developers who wish to implement Apache Kafka stream processing without writing code.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Setting up Confluent KSQL

Overview of KSQL Features and Architecture

How KSQL Interacts with Apache Kafka

Use Cases for KSQL

KSQL Command Line and Operations

Ingesting Data (CSV, JSON, etc.)

Creating a Stream

Creating a Table

Advanced KSQL Operations (Joins, Windowing, Aggregations, Geospatial, etc.)

Deploying KSQL to Production

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037528 [source_title] => Confluent KSQL [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => ksql ) [apacheignite] => stdClass Object ( [course_code] => apacheignite [hr_nid] => 209621 [title] => Apache Ignite for Developers [requirements] =>

Audience

[overview] =>

Apache Ignite is an in-memory computing platform that sits between the application and data layer to improve speed, scale, and availability.

This instructor-led, live training (online or onsite) is aimed at developers who wish to learn the principles behind persistent and pure in-memory storage as they step through the creation of a sample in-memory computing project.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at developers who wish to learn the principles behind persistent and pure in-memory storage as they step through the creation of a sample in-memory computing project.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Big Data Tools and Technologies

Installing and Configuring Apache Ignite

Overview of Ignite Architecture

Querying Data in Ignite

Spreading Large Data Sets across a Cluster

Understanding the In-Memory Data Grid

Writing a Service in Ignite

Running Distributed Computing with Ignite

Integrating Ignite with RDBMS, NoSQL, Hadoop and Machine Learning Processors

Testing and Troubleshooting

Summary and Next Steps

[language] => en [duration] => 14 [status] => published [changed] => 1700037322 [source_title] => Apache Ignite for Developers [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => apacheignite ) [beam] => stdClass Object ( [course_code] => beam [hr_nid] => 283646 [title] => Unified Batch and Stream Processing with Apache Beam [requirements] =>

Audience

[overview] =>

Apache Beam is an open source, unified programming model for defining and executing parallel data processing pipelines. It's power lies in its ability to run both batch and streaming pipelines, with execution being carried out by one of Beam's supported distributed processing back-ends: Apache Apex, Apache Flink, Apache Spark, and Google Cloud Dataflow. Apache Beam is useful for ETL (Extract, Transform, and Load) tasks such as moving data between different storage media and data sources, transforming data into a more desirable format, and loading data onto a new system.

In this instructor-led, live training (onsite or remote), participants will learn how to implement the Apache Beam SDKs in a Java or Python application that defines a data processing pipeline for decomposing a big data set into smaller chunks for independent, parallel processing.

By the end of this training, participants will be able to:

Format of the Course

Note

[category_overview] => [outline] =>

Introduction

Installing and Configuring Apache Beam

Overview of Apache Beam Features and Architecture

Understanding the Apache Beam Programming Model

Running a sample pipeline

Designing a Pipeline

Creating the Pipeline

Executing the Pipeline

Testing and Debugging Apache Beam

Processing Bounded and Unbounded Datasets

Making Your Pipelines Reusable and Maintainable

Create New Data Sources and Sinks

Integrating Apache Beam with other Big Data Systems

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 14 [status] => published [changed] => 1700037430 [source_title] => Unified Batch and Stream Processing with Apache Beam [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => beam ) [apex] => stdClass Object ( [course_code] => apex [hr_nid] => 209525 [title] => Apache Apex: Processing Big Data-in-Motion [requirements] =>

Audience

[overview] =>

Apache Apex is a YARN-native platform that unifies stream and batch processing. It processes big data-in-motion in a way that is scalable, performant, fault-tolerant, stateful, secure, distributed, and easily operable.

This instructor-led, live training introduces Apache Apex's unified stream processing architecture, and walks participants through the creation of a distributed application using Apex on Hadoop.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] => [outline] =>

To request a customized course outline for this training, please contact us.

 

[language] => en [duration] => 21 [status] => published [changed] => 1700037320 [source_title] => Apache Apex: Processing Big Data-in-Motion [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => apex ) [storm] => stdClass Object ( [course_code] => storm [hr_nid] => 208253 [title] => Apache Storm [requirements] => [overview] =>

Apache Storm is a distributed, real-time computation engine used for enabling real-time business intelligence. It does so by enabling applications to reliably process unbounded streams of data (a.k.a. stream processing).

"Storm is for real-time processing what Hadoop is for batch processing!"

In this instructor-led live training, participants will learn how to install and configure Apache Storm, then develop and deploy an Apache Storm application for processing big data in real-time.

Some of the topics included in this training include:

Request this course now!

Audience

Format of the course

[category_overview] => [outline] =>

Request a customized course outline for this training!

[language] => en [duration] => 28 [status] => published [changed] => 1700037303 [source_title] => Apache Storm [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => storm ) [nifi] => stdClass Object ( [course_code] => nifi [hr_nid] => 212800 [title] => Apache NiFi for Administrators [requirements] =>

Audience

[overview] =>

Apache NiFi (Hortonworks DataFlow) is a real-time integrated data logistics and simple event processing platform that enables the moving, tracking and automation of data between systems. It is written using flow-based programming and provides a web-based user interface to manage dataflows in real time.

In this instructor-led, live training (onsite or remote), participants will learn how to deploy and manage Apache NiFi in a live lab environment.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

In this instructor-led, live training in <loc> (onsite or remote), participants will learn how to deploy and manage Apache NiFi in a live lab environment.

By the end of this training, participants will be able to:

[outline] =>

Introduction to Apache NiFi   

Overview of Big Data and Apache Hadoop

Setting up and Running a NiFi Cluster

NiFi Operations

Monitoring and Recovery

Optimizing NiFI

Best practices

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 21 [status] => published [changed] => 1700037360 [source_title] => Apache NiFi for Administrators [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => nifi ) [nifidev] => stdClass Object ( [course_code] => nifidev [hr_nid] => 212804 [title] => Apache NiFi for Developers [requirements] =>

Audience

[overview] =>

Apache NiFi (Hortonworks DataFlow) is a real-time integrated data logistics and simple event processing platform that enables the moving, tracking and automation of data between systems. It is written using flow-based programming and provides a web-based user interface to manage dataflows in real time.

In this instructor-led, live training, participants will learn the fundamentals of flow-based programming as they develop a number of demo extensions, components and processors using Apache NiFi.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn the fundamentals of flow-based programming as they develop a number of demo extensions, components and processors using Apache NiFi.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Big Data Tools and Technologies

Installing and Configuring NiFi

Overview of NiFi Architecture

Development Approaches

Design Considerations

Components, Events, and Processor Patterns

Exercise: Streaming Data Feeds into HDFS

Error Handling

Controller Services

Exercise: Ingesting Data from IoT Devices using Web-Based APIs

Exercise: Developing a Custom Apache Nifi Processor using JSON

Testing and Troubleshooting

Contributing to Apache NiFi

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037361 [source_title] => Apache NiFi for Developers [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => nifidev ) [flink] => stdClass Object ( [course_code] => flink [hr_nid] => 209489 [title] => Apache Flink Fundamentals [requirements] =>

Audience

[overview] =>

Apache Flink is an open-source framework for scalable stream and batch data processing.

This instructor-led, live training (online or onsite) introduces the principles and approaches behind distributed stream and batch data processing, and walks participants through the creation of a real-time, data streaming application in Apache Flink.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) introduces the principles and approaches behind distributed stream and batch data processing, and walks participants through the creation of a real-time, data streaming application in Apache Flink.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Installing and Configuring Apache Flink

Overview of Flink Architecture

Developing Data Streaming Applications in Flink

Managing Diverse Workloads

Performing Advanced Analytics

Setting up a Multi-Node Flink Cluster

Mastering Flink DataStream API

Understanding Flink Libraries

Integrating Flink with Other Big Data Tools

Testing and Troubleshooting

Summary and Next Steps

[language] => en [duration] => 28 [status] => published [changed] => 1700037319 [source_title] => Apache Flink Fundamentals [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => flink ) [sparkpython] => stdClass Object ( [course_code] => sparkpython [hr_nid] => 279430 [title] => Python and Spark for Big Data (PySpark) [requirements] =>

Audience

[overview] =>

Python is a high-level programming language famous for its clear syntax and code readibility. Spark is a data processing engine used in querying, analyzing, and transforming big data. PySpark allows users to interface Spark with Python.

In this instructor-led, live training, participants will learn how to use Python and Spark together to analyze big data as they work on hands-on exercises.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn how to use Python and Spark together to analyze big data as they work on hands-on exercises.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Understanding Big Data

Overview of Spark

Overview of Python

Overview of PySpark

Setting Up Python with Spark

Setting Up PySpark

Using Amazon Web Services (AWS) EC2 Instances for Spark

Setting Up Databricks

Setting Up the AWS EMR Cluster

Learning the Basics of Python Programming

Learning the Basics of Spark DataFrame

Working on a Spark DataFrame Project Exercise

Understanding Machine Learning with MLlib

Working with MLlib, Spark, and Python for Machine Learning

Understanding Regressions

Understanding Random Forests and Decision Trees

Working with K-means Clustering

Working with Recommender Systems

Implementing Natural Language Processing

Streaming with Spark on Python

Closing Remarks

[language] => en [duration] => 21 [status] => published [changed] => 1715349940 [source_title] => Python and Spark for Big Data (PySpark) [source_language] => en [cert_code] => [weight] => -998 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => sparkpython ) [graphcomputing] => stdClass Object ( [course_code] => graphcomputing [hr_nid] => 278402 [title] => Introduction to Graph Computing [requirements] =>

Audience

[overview] =>

Many real world problems can be described in terms of graphs. For example, the Web graph, the social network graph, the train network graph and the language graph. These graphs tend to be extremely large; processing them requires a specialized set of tools and processes -- these tools and processes can be referred to as Graph Computing (also known as Graph Analytics).

In this instructor-led, live training, participants will learn about the technology offerings and implementation approaches for processing graph data. The aim is to identify real-world objects, their characteristics and relationships, then model these relationships and process them as data using a Graph Computing (also known as Graph Analytics and Distributed Graph Processing) approach. We start with a broad overview and narrow in on specific tools as we step through a series of case studies, hands-on exercises and live deployments.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn about the technology offerings and implementation approaches for processing graph data. The aim is to identify real-world objects, their characteristics and relationships, then model these relationships and process them as data using a Graph Computing (also known as Graph Analytics) approach. We start with a broad overview and narrow in on specific tools as we step through a series of case studies, hands-on exercises and live deployments.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Understanding Graph Data

Using Graph Databases to Model, Persist and Process Graph Data

Exercise: Modeling Graph Data with neo4j

Beyond Graph Databases: Graph Computing

Solving Real-World Problems with Traversals

Case Study: Ranking Discussion Contributors

Graph Computing: Local, In-Memory Graph toolkits

Exercise: Modeling Graph Data with NetworkX

Graph Computing: Batch Processing Graph Frameworks

Graph Computing: Graph-Parallel Computation

Setup and Installation

GraphX Operators

Iterating with Pregel API

Building a Graph

Designing Scalable Algorithms

Accessing Additional Algorithms

Exercis: Page Rank and Top Users

Deploying to Production

Closing Remarks

[language] => en [duration] => 28 [status] => published [changed] => 1715349940 [source_title] => Introduction to Graph Computing [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => graphcomputing ) [aitech] => stdClass Object ( [course_code] => aitech [hr_nid] => 199320 [title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [requirements] => [overview] =>

This course is aimed at developers and data scientists who wish to understand and implement AI within their applications. Special focus is given to Data Analysis, Distributed AI and NLP.

[category_overview] => [outline] =>
  1. Distribution big data
    1. Data mining methods (training single systems + distributed prediction: traditional machine learning algorithms + Mapreduce distributed prediction)
    2. Apache Spark MLlib
  2. Recommendations and Advertising:
    1. Natural language
    2. Text clustering, text categorization (labeling), synonyms
    3. User profile restore, labeling system
    4. Recommended algorithms
    5. Insuring the accuracy of "lift" between and within categories
    6. How to create closed loops for recommendation algorithms
  3. Logical regression, RankingSVM,
  4. Feature recognition (deep learning and automatic feature recognition for graphics)
  5. Natural language
    1. Chinese word segmentation
    2. Theme model (text clustering)
    3. Text classification
    4. Extract keywords
    5. Semantic analysis, semantic parser, word2vec (vector to word)
    6. RNN long-term memory (TSTM) architecture
[language] => en [duration] => 21 [status] => published [changed] => 1715084120 [source_title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [source_language] => zh-hans [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => aitech ) [spmllib] => stdClass Object ( [course_code] => spmllib [hr_nid] => 141237 [title] => Apache Spark MLlib [requirements] =>

Knowledge of one of the following:

[overview] =>

MLlib is Spark’s machine learning (ML) library. Its goal is to make practical machine learning scalable and easy. It consists of common learning algorithms and utilities, including classification, regression, clustering, collaborative filtering, dimensionality reduction, as well as lower-level optimization primitives and higher-level pipeline APIs.

It divides into two packages:

 

Audience

This course is directed at engineers and developers seeking to utilize a built in Machine Library for Apache Spark

[category_overview] => [outline] =>

spark.mllib: data types, algorithms, and utilities

spark.ml: high-level APIs for ML pipelines

[language] => en [duration] => 35 [status] => published [changed] => 1700037209 [source_title] => Apache Spark MLlib [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => spmllib ) [kdd] => stdClass Object ( [course_code] => kdd [hr_nid] => 200632 [title] => Knowledge Discovery in Databases (KDD) [requirements] => [overview] =>

Knowledge discovery in databases (KDD) is the process of discovering useful knowledge from a collection of data. Real-life applications for this data mining technique include marketing, fraud detection, telecommunication and manufacturing.

In this instructor-led, live course, we introduce the processes involved in KDD and carry out a series of exercises to practice the implementation of those processes.

Audience

Format of the Course

[category_overview] => [outline] =>

Introduction

Establishing the application domain

Establishing relevant prior knowledge

Understanding the goal of the investigation

Creating a target data set

Data cleaning and preprocessing

Data reduction and projection

Choosing the data mining task

Choosing the data mining algorithms

Interpreting the mined patterns

Summary and conclusion

[language] => en [duration] => 21 [status] => published [changed] => 1700037259 [source_title] => Knowledge Discovery in Databases (KDD) [source_language] => en [cert_code] => [weight] => -987 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => kdd ) ) [codes] => Array ( [0] => datavault [1] => sparkstreaming [2] => ksql [3] => apacheignite [4] => beam [5] => apex [6] => storm [7] => nifi [8] => nifidev [9] => flink [10] => sparkpython [11] => graphcomputing [12] => aitech [13] => spmllib [14] => kdd ) ) [4] => Array ( [regions] => Array ( [ec_4966] => Array ( [tid] => ec_4966 [title] => Guayaquil [sales_area] => ec_ecuador [venues] => Array ( [ec_15661446] => Array ( [vid] => ec_15661446 [title] => Guayaquil - Mall del Sol [vfdc] => 175.00 [prices] => Array ( [1] => Array ( [remote guaranteed] => 5437 [classroom guaranteed] => 5787 [remote guaranteed per delegate] => 5437 [delegates] => 1 [adp] => 937 [classroom guaranteed per delegate] => 5787 ) [2] => Array ( [remote guaranteed] => 6374 [classroom guaranteed] => 6844 [remote guaranteed per delegate] => 3187 [delegates] => 2 [adp] => 937 [classroom guaranteed per delegate] => 3422 ) [3] => Array ( [remote guaranteed] => 7311 [classroom guaranteed] => 7902 [remote guaranteed per delegate] => 2437 [delegates] => 3 [adp] => 937 [classroom guaranteed per delegate] => 2634 ) [4] => Array ( [remote guaranteed] => 8248 [classroom guaranteed] => 8960 [remote guaranteed per delegate] => 2062 [delegates] => 4 [adp] => 937 [classroom guaranteed per delegate] => 2240 ) [5] => Array ( [remote guaranteed] => 9185 [classroom guaranteed] => 10015 [remote guaranteed per delegate] => 1837 [delegates] => 5 [adp] => 937 [classroom guaranteed per delegate] => 2003 ) [6] => Array ( [remote guaranteed] => 10122 [classroom guaranteed] => 11070 [remote guaranteed per delegate] => 1687 [delegates] => 6 [adp] => 937 [classroom guaranteed per delegate] => 1845 ) [7] => Array ( [remote guaranteed] => 11060 [classroom guaranteed] => 12131 [remote guaranteed per delegate] => 1580 [delegates] => 7 [adp] => 937 [classroom guaranteed per delegate] => 1733 ) [8] => Array ( [remote guaranteed] => 12000 [classroom guaranteed] => 13184 [remote guaranteed per delegate] => 1500 [delegates] => 8 [adp] => 937 [classroom guaranteed per delegate] => 1648 ) [9] => Array ( [remote guaranteed] => 12933 [classroom guaranteed] => 14247 [remote guaranteed per delegate] => 1437 [delegates] => 9 [adp] => 937 [classroom guaranteed per delegate] => 1583 ) [10] => Array ( [remote guaranteed] => 13870 [classroom guaranteed] => 15300 [remote guaranteed per delegate] => 1387 [delegates] => 10 [adp] => 937 [classroom guaranteed per delegate] => 1530 ) ) ) ) ) [ec_4967] => Array ( [tid] => ec_4967 [title] => Quito [sales_area] => ec_ecuador [venues] => Array ( [ec_15661447] => Array ( [vid] => ec_15661447 [title] => Quito - Av Eloy Alfaro [vfdc] => 200.00 [prices] => Array ( [1] => Array ( [remote guaranteed] => 5437 [classroom guaranteed] => 5837 [remote guaranteed per delegate] => 5437 [delegates] => 1 [adp] => 937 [classroom guaranteed per delegate] => 5837 ) [2] => Array ( [remote guaranteed] => 6374 [classroom guaranteed] => 6874 [remote guaranteed per delegate] => 3187 [delegates] => 2 [adp] => 937 [classroom guaranteed per delegate] => 3437 ) [3] => Array ( [remote guaranteed] => 7311 [classroom guaranteed] => 7911 [remote guaranteed per delegate] => 2437 [delegates] => 3 [adp] => 937 [classroom guaranteed per delegate] => 2637 ) [4] => Array ( [remote guaranteed] => 8248 [classroom guaranteed] => 8948 [remote guaranteed per delegate] => 2062 [delegates] => 4 [adp] => 937 [classroom guaranteed per delegate] => 2237 ) [5] => Array ( [remote guaranteed] => 9185 [classroom guaranteed] => 9985 [remote guaranteed per delegate] => 1837 [delegates] => 5 [adp] => 937 [classroom guaranteed per delegate] => 1997 ) [6] => Array ( [remote guaranteed] => 10122 [classroom guaranteed] => 11022 [remote guaranteed per delegate] => 1687 [delegates] => 6 [adp] => 937 [classroom guaranteed per delegate] => 1837 ) [7] => Array ( [remote guaranteed] => 11060 [classroom guaranteed] => 12061 [remote guaranteed per delegate] => 1580 [delegates] => 7 [adp] => 937 [classroom guaranteed per delegate] => 1723 ) [8] => Array ( [remote guaranteed] => 12000 [classroom guaranteed] => 13096 [remote guaranteed per delegate] => 1500 [delegates] => 8 [adp] => 937 [classroom guaranteed per delegate] => 1637 ) [9] => Array ( [remote guaranteed] => 12933 [classroom guaranteed] => 14130 [remote guaranteed per delegate] => 1437 [delegates] => 9 [adp] => 937 [classroom guaranteed per delegate] => 1570 ) [10] => Array ( [remote guaranteed] => 13870 [classroom guaranteed] => 15170 [remote guaranteed per delegate] => 1387 [delegates] => 10 [adp] => 937 [classroom guaranteed per delegate] => 1517 ) ) ) ) ) ) [remote] => Array ( [1] => Array ( [remote guaranteed] => 5437 [remote guaranteed per delegate] => 5437 [adp] => 937 ) [2] => Array ( [remote guaranteed] => 6374 [remote guaranteed per delegate] => 3187 [adp] => 937 ) [3] => Array ( [remote guaranteed] => 7311 [remote guaranteed per delegate] => 2437 [adp] => 937 ) [4] => Array ( [remote guaranteed] => 8248 [remote guaranteed per delegate] => 2062 [adp] => 937 ) [5] => Array ( [remote guaranteed] => 9185 [remote guaranteed per delegate] => 1837 [adp] => 937 ) [6] => Array ( [remote guaranteed] => 10122 [remote guaranteed per delegate] => 1687 [adp] => 937 ) [7] => Array ( [remote guaranteed] => 11060 [remote guaranteed per delegate] => 1580 [adp] => 937 ) [8] => Array ( [remote guaranteed] => 12000 [remote guaranteed per delegate] => 1500 [adp] => 937 ) [9] => Array ( [remote guaranteed] => 12933 [remote guaranteed per delegate] => 1437 [adp] => 937 ) [10] => Array ( [remote guaranteed] => 13870 [remote guaranteed per delegate] => 1387 [adp] => 937 ) ) [currency] => USD ) [5] => Array ( [0] => 5 [1] => 5 [2] => 4 [3] => 4 [4] => 5 ) [6] => Array ( [479923] => Array ( [title] => Apache NiFi for Developers [rating] => 5 [delegate_and_company] => Pedro [body] => I liked the virtual machine environments because he could easily toggle between the views and help if we were struggling with the material. [mc] => [is_mt] => 0 [nid] => 479923 ) [445523] => Array ( [title] => Python and Spark for Big Data (PySpark) [rating] => 5 [delegate_and_company] => Aurelia-Adriana - Allianz Services Romania [body] => I liked that it was practical. Loved to apply the theoretical knowledge with practical examples. [mc] => [is_mt] => 0 [nid] => 445523 ) [422075] => Array ( [title] => Apache NiFi for Administrators [rating] => 4 [delegate_and_company] => Rolando García - OIT para México y Cuba [body] => Muy poco, se me dificulto mucho y mas por que entre desfasado, no tome los primeras sesiones. [mc] => [is_mt] => 0 [nid] => 422075 ) [404743] => Array ( [title] => Data Vault: Building a Scalable Data Warehouse [rating] => 4 [delegate_and_company] => john ernesto ii fernandez - Philippine AXA Life Insurance Corporation [body] => how the trainor shows his knowledge in the subject he's teachign [mc] => [is_mt] => 0 [nid] => 404743 ) [283902] => Array ( [title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [rating] => 5 [delegate_and_company] => Laura Kahn [body] => This is one of the best hands-on with exercises programming courses I have ever taken. [mc] => This is one of the best hands-on with exercises programming courses I have ever taken. [is_mt] => 0 [nid] => 283902 ) ) [7] => 4.6 [8] => 1 [9] => 1 [10] => ) ) [7] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [8] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [9] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [10] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "devel_domain" /apps/nobleprog-website/includes/functions/new-modules-general-functions.php:82 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [line] => 82 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "devel_domain" [2] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [3] => 82 ) ) [1] => Array ( [file] => /apps/hitra7/drupal7/sites/all/modules/_custom/frontend/islc7/islc7.module [line] => 99 [function] => variable_get [args] => Array ( [0] => devel_domain [1] => ) ) [2] => Array ( [file] => /apps/hitra7/drupal7/sites/all/modules/_custom/frontend/islc7/islc7_block.inc [line] => 34 [function] => islc_get_site_list [args] => Array ( ) ) [3] => Array ( [file] => /apps/nobleprog-website/nptemplates/default.php [line] => 265 [function] => islc7_sites_links_array_v3 [args] => Array ( ) ) [4] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 85 [args] => Array ( [0] => /apps/nobleprog-website/nptemplates/default.php ) [function] => require_once ) [5] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 31 [function] => course_render [args] => Array ( [0] => Array ( [course_code] => bdatr [hr_nid] => 279166 [title] => Big Data Analytics for Telecom Regulators [requirements] =>

There are no specific requirements needed to attend this course.

[overview] =>

To meet compliance of the regulators, CSPs (Communication service providers) can tap into Big Data Analytics which not only help them to meet compliance but within the scope of same project they can increase customer satisfaction and thus reduce the churn. In fact since compliance is related to Quality of service tied to a contract, any initiative towards meeting the compliance, will improve the “competitive edge” of the CSPs. Therefore, it is important that Regulators should be able to advise/guide a set of Big Data analytic practice for CSPs that will be of mutual benefit between the regulators and CSPs.

The course consists of 8 modules (4 on day 1, and 4 on day 2)

[category_overview] => [outline] =>

1. Module-1 : Case studies of how Telecom Regulators have used Big Data Analytics for imposing compliance :

2. Module-2 : Reviewing Millions of contract between CSPs and its users using unstructured Big data analytics

3. Module -3 : Extracting Structured information from unstructured Customer Contract and map them to Quality of Service obtained from IPDR data &amp; Crowd Sourced app data. Metric for Compliance. Automatic detection of compliance violations.

4. Module- 4 : USING app approach to collect compliance and QoS data- release a free regulatory mobile app to the users to track & Analyze automatically. In this approach regulatory authority will be releasing free app and distribute among the users-and the app will be collecting data on QoS/Spams etc and report it back in analytic dashboard form :

5. Module-5 : Processing of regulatory app data for automatic alarm system generation (alarms will be generated and emailed/sms to stake holders automatically) :
Implementation of dashboard and alarm service

6. Module-6 : Use IPDR data for QoS and Compliance-IPDR Big data analytics:

7. Module-7 : Customer Service Experience &amp; Big Data approach to CSP CRM :

8. Module-8 : Big Data ETL for integrating different QoS data source and combine to a single dashboard alarm based analytics:

[language] => en [duration] => 14 [status] => published [changed] => 1700037381 [source_title] => Big Data Analytics for Telecom Regulators [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) ) [1] => Array ( [0] => stdClass Object ( [tid] => 766 [alias] => big-data-training [name] => Big Data [english_name] => Big Data [consulting_option] => available_promoted ) ) [2] => bdatr [3] => Array ( [outlines] => Array ( [datavault] => stdClass Object ( [course_code] => datavault [hr_nid] => 210132 [title] => Data Vault: Building a Scalable Data Warehouse [requirements] =>

Audience

[overview] =>

Data Vault Modeling is a database modeling technique that provides long-term historical storage of data that originates from multiple sources. A data vault stores a single version of the facts, or "all the data, all the time". Its flexible, scalable, consistent and adaptable design encompasses the best aspects of 3rd normal form (3NF) and star schema.

In this instructor-led, live training, participants will learn how to build a Data Vault.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn how to build a Data Vault.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Data Vault architecture and design principles

Data Vault applications

Data Vault components

Building a Data Vault

Modeling Hubs, Links and Satellites

Data Vault reference rules

How components interact with each other

Modeling and populating a Data Vault

Converting 3NF OLTP to a Data Vault Enterprise Data Warehouse (EDW)

Understanding load dates, end-dates, and join operations

Business keys, relationships, link tables and join techniques

Query techniques

Load processing and query processing

Overview of Matrix Methodology

Getting data into data entities

Loading Hub Entities

Loading Link Entities

Loading Satellites

Using SEI/CMM Level 5 templates to obtain repeatable, reliable, and quantifiable results

Developing a consistent and repeatable ETL (Extract, Transform, Load) process

Building and deploying highly scalable and repeatable warehouses

Closing remarks

[language] => en [duration] => 28 [status] => published [changed] => 1715349914 [source_title] => Data Vault: Building a Scalable Data Warehouse [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => datavault ) [sparkstreaming] => stdClass Object ( [course_code] => sparkstreaming [hr_nid] => 356863 [title] => Spark Streaming with Python and Kafka [requirements] =>

Audience

[overview] =>

Apache Spark Streaming is a scalable, open source stream processing system that allows users to process real-time data from supported sources. Spark Streaming enables fault-tolerant processing of data streams.

This instructor-led, live training (online or onsite) is aimed at data engineers, data scientists, and programmers who wish to use Spark Streaming features in processing and analyzing real-time data.

By the end of this training, participants will be able to use Spark Streaming to process live data streams for use in databases, filesystems, and live dashboards.

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at data engineers, data scientists, and programmers who wish to use Spark Streaming features in processing and analyzing real-time data.

By the end of this training, participants will be able to use Spark Streaming to process live data streams for use in databases, filesystems, and live dashboards.

[outline] =>

Introduction

Overview of Spark Streaming Features and Architecture

Preparing the Environment

Processing Messages

Performing a Windowed Stream Processing

Prototyping the Processing Code

Streaming the Code

Acquiring Stream Output

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037710 [source_title] => Spark Streaming with Python and Kafka [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => sparkstreaming ) [ksql] => stdClass Object ( [course_code] => ksql [hr_nid] => 318463 [title] => Confluent KSQL [requirements] =>

Audience

[overview] =>

Confluent KSQL is a stream processing framework built on top of Apache Kafka. It enables real-time data processing using SQL operations.

This instructor-led, live training (online or onsite) is aimed at developers who wish to implement Apache Kafka stream processing without writing code.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at developers who wish to implement Apache Kafka stream processing without writing code.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Setting up Confluent KSQL

Overview of KSQL Features and Architecture

How KSQL Interacts with Apache Kafka

Use Cases for KSQL

KSQL Command Line and Operations

Ingesting Data (CSV, JSON, etc.)

Creating a Stream

Creating a Table

Advanced KSQL Operations (Joins, Windowing, Aggregations, Geospatial, etc.)

Deploying KSQL to Production

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037528 [source_title] => Confluent KSQL [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => ksql ) [apacheignite] => stdClass Object ( [course_code] => apacheignite [hr_nid] => 209621 [title] => Apache Ignite for Developers [requirements] =>

Audience

[overview] =>

Apache Ignite is an in-memory computing platform that sits between the application and data layer to improve speed, scale, and availability.

This instructor-led, live training (online or onsite) is aimed at developers who wish to learn the principles behind persistent and pure in-memory storage as they step through the creation of a sample in-memory computing project.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at developers who wish to learn the principles behind persistent and pure in-memory storage as they step through the creation of a sample in-memory computing project.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Big Data Tools and Technologies

Installing and Configuring Apache Ignite

Overview of Ignite Architecture

Querying Data in Ignite

Spreading Large Data Sets across a Cluster

Understanding the In-Memory Data Grid

Writing a Service in Ignite

Running Distributed Computing with Ignite

Integrating Ignite with RDBMS, NoSQL, Hadoop and Machine Learning Processors

Testing and Troubleshooting

Summary and Next Steps

[language] => en [duration] => 14 [status] => published [changed] => 1700037322 [source_title] => Apache Ignite for Developers [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => apacheignite ) [beam] => stdClass Object ( [course_code] => beam [hr_nid] => 283646 [title] => Unified Batch and Stream Processing with Apache Beam [requirements] =>

Audience

[overview] =>

Apache Beam is an open source, unified programming model for defining and executing parallel data processing pipelines. It's power lies in its ability to run both batch and streaming pipelines, with execution being carried out by one of Beam's supported distributed processing back-ends: Apache Apex, Apache Flink, Apache Spark, and Google Cloud Dataflow. Apache Beam is useful for ETL (Extract, Transform, and Load) tasks such as moving data between different storage media and data sources, transforming data into a more desirable format, and loading data onto a new system.

In this instructor-led, live training (onsite or remote), participants will learn how to implement the Apache Beam SDKs in a Java or Python application that defines a data processing pipeline for decomposing a big data set into smaller chunks for independent, parallel processing.

By the end of this training, participants will be able to:

Format of the Course

Note

[category_overview] => [outline] =>

Introduction

Installing and Configuring Apache Beam

Overview of Apache Beam Features and Architecture

Understanding the Apache Beam Programming Model

Running a sample pipeline

Designing a Pipeline

Creating the Pipeline

Executing the Pipeline

Testing and Debugging Apache Beam

Processing Bounded and Unbounded Datasets

Making Your Pipelines Reusable and Maintainable

Create New Data Sources and Sinks

Integrating Apache Beam with other Big Data Systems

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 14 [status] => published [changed] => 1700037430 [source_title] => Unified Batch and Stream Processing with Apache Beam [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => beam ) [apex] => stdClass Object ( [course_code] => apex [hr_nid] => 209525 [title] => Apache Apex: Processing Big Data-in-Motion [requirements] =>

Audience

[overview] =>

Apache Apex is a YARN-native platform that unifies stream and batch processing. It processes big data-in-motion in a way that is scalable, performant, fault-tolerant, stateful, secure, distributed, and easily operable.

This instructor-led, live training introduces Apache Apex's unified stream processing architecture, and walks participants through the creation of a distributed application using Apex on Hadoop.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] => [outline] =>

To request a customized course outline for this training, please contact us.

 

[language] => en [duration] => 21 [status] => published [changed] => 1700037320 [source_title] => Apache Apex: Processing Big Data-in-Motion [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => apex ) [storm] => stdClass Object ( [course_code] => storm [hr_nid] => 208253 [title] => Apache Storm [requirements] => [overview] =>

Apache Storm is a distributed, real-time computation engine used for enabling real-time business intelligence. It does so by enabling applications to reliably process unbounded streams of data (a.k.a. stream processing).

"Storm is for real-time processing what Hadoop is for batch processing!"

In this instructor-led live training, participants will learn how to install and configure Apache Storm, then develop and deploy an Apache Storm application for processing big data in real-time.

Some of the topics included in this training include:

Request this course now!

Audience

Format of the course

[category_overview] => [outline] =>

Request a customized course outline for this training!

[language] => en [duration] => 28 [status] => published [changed] => 1700037303 [source_title] => Apache Storm [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => storm ) [nifi] => stdClass Object ( [course_code] => nifi [hr_nid] => 212800 [title] => Apache NiFi for Administrators [requirements] =>

Audience

[overview] =>

Apache NiFi (Hortonworks DataFlow) is a real-time integrated data logistics and simple event processing platform that enables the moving, tracking and automation of data between systems. It is written using flow-based programming and provides a web-based user interface to manage dataflows in real time.

In this instructor-led, live training (onsite or remote), participants will learn how to deploy and manage Apache NiFi in a live lab environment.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

In this instructor-led, live training in <loc> (onsite or remote), participants will learn how to deploy and manage Apache NiFi in a live lab environment.

By the end of this training, participants will be able to:

[outline] =>

Introduction to Apache NiFi   

Overview of Big Data and Apache Hadoop

Setting up and Running a NiFi Cluster

NiFi Operations

Monitoring and Recovery

Optimizing NiFI

Best practices

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 21 [status] => published [changed] => 1700037360 [source_title] => Apache NiFi for Administrators [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => nifi ) [nifidev] => stdClass Object ( [course_code] => nifidev [hr_nid] => 212804 [title] => Apache NiFi for Developers [requirements] =>

Audience

[overview] =>

Apache NiFi (Hortonworks DataFlow) is a real-time integrated data logistics and simple event processing platform that enables the moving, tracking and automation of data between systems. It is written using flow-based programming and provides a web-based user interface to manage dataflows in real time.

In this instructor-led, live training, participants will learn the fundamentals of flow-based programming as they develop a number of demo extensions, components and processors using Apache NiFi.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn the fundamentals of flow-based programming as they develop a number of demo extensions, components and processors using Apache NiFi.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Big Data Tools and Technologies

Installing and Configuring NiFi

Overview of NiFi Architecture

Development Approaches

Design Considerations

Components, Events, and Processor Patterns

Exercise: Streaming Data Feeds into HDFS

Error Handling

Controller Services

Exercise: Ingesting Data from IoT Devices using Web-Based APIs

Exercise: Developing a Custom Apache Nifi Processor using JSON

Testing and Troubleshooting

Contributing to Apache NiFi

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037361 [source_title] => Apache NiFi for Developers [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => nifidev ) [flink] => stdClass Object ( [course_code] => flink [hr_nid] => 209489 [title] => Apache Flink Fundamentals [requirements] =>

Audience

[overview] =>

Apache Flink is an open-source framework for scalable stream and batch data processing.

This instructor-led, live training (online or onsite) introduces the principles and approaches behind distributed stream and batch data processing, and walks participants through the creation of a real-time, data streaming application in Apache Flink.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) introduces the principles and approaches behind distributed stream and batch data processing, and walks participants through the creation of a real-time, data streaming application in Apache Flink.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Installing and Configuring Apache Flink

Overview of Flink Architecture

Developing Data Streaming Applications in Flink

Managing Diverse Workloads

Performing Advanced Analytics

Setting up a Multi-Node Flink Cluster

Mastering Flink DataStream API

Understanding Flink Libraries

Integrating Flink with Other Big Data Tools

Testing and Troubleshooting

Summary and Next Steps

[language] => en [duration] => 28 [status] => published [changed] => 1700037319 [source_title] => Apache Flink Fundamentals [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => flink ) [sparkpython] => stdClass Object ( [course_code] => sparkpython [hr_nid] => 279430 [title] => Python and Spark for Big Data (PySpark) [requirements] =>

Audience

[overview] =>

Python is a high-level programming language famous for its clear syntax and code readibility. Spark is a data processing engine used in querying, analyzing, and transforming big data. PySpark allows users to interface Spark with Python.

In this instructor-led, live training, participants will learn how to use Python and Spark together to analyze big data as they work on hands-on exercises.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn how to use Python and Spark together to analyze big data as they work on hands-on exercises.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Understanding Big Data

Overview of Spark

Overview of Python

Overview of PySpark

Setting Up Python with Spark

Setting Up PySpark

Using Amazon Web Services (AWS) EC2 Instances for Spark

Setting Up Databricks

Setting Up the AWS EMR Cluster

Learning the Basics of Python Programming

Learning the Basics of Spark DataFrame

Working on a Spark DataFrame Project Exercise

Understanding Machine Learning with MLlib

Working with MLlib, Spark, and Python for Machine Learning

Understanding Regressions

Understanding Random Forests and Decision Trees

Working with K-means Clustering

Working with Recommender Systems

Implementing Natural Language Processing

Streaming with Spark on Python

Closing Remarks

[language] => en [duration] => 21 [status] => published [changed] => 1715349940 [source_title] => Python and Spark for Big Data (PySpark) [source_language] => en [cert_code] => [weight] => -998 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => sparkpython ) [graphcomputing] => stdClass Object ( [course_code] => graphcomputing [hr_nid] => 278402 [title] => Introduction to Graph Computing [requirements] =>

Audience

[overview] =>

Many real world problems can be described in terms of graphs. For example, the Web graph, the social network graph, the train network graph and the language graph. These graphs tend to be extremely large; processing them requires a specialized set of tools and processes -- these tools and processes can be referred to as Graph Computing (also known as Graph Analytics).

In this instructor-led, live training, participants will learn about the technology offerings and implementation approaches for processing graph data. The aim is to identify real-world objects, their characteristics and relationships, then model these relationships and process them as data using a Graph Computing (also known as Graph Analytics and Distributed Graph Processing) approach. We start with a broad overview and narrow in on specific tools as we step through a series of case studies, hands-on exercises and live deployments.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn about the technology offerings and implementation approaches for processing graph data. The aim is to identify real-world objects, their characteristics and relationships, then model these relationships and process them as data using a Graph Computing (also known as Graph Analytics) approach. We start with a broad overview and narrow in on specific tools as we step through a series of case studies, hands-on exercises and live deployments.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Understanding Graph Data

Using Graph Databases to Model, Persist and Process Graph Data

Exercise: Modeling Graph Data with neo4j

Beyond Graph Databases: Graph Computing

Solving Real-World Problems with Traversals

Case Study: Ranking Discussion Contributors

Graph Computing: Local, In-Memory Graph toolkits

Exercise: Modeling Graph Data with NetworkX

Graph Computing: Batch Processing Graph Frameworks

Graph Computing: Graph-Parallel Computation

Setup and Installation

GraphX Operators

Iterating with Pregel API

Building a Graph

Designing Scalable Algorithms

Accessing Additional Algorithms

Exercis: Page Rank and Top Users

Deploying to Production

Closing Remarks

[language] => en [duration] => 28 [status] => published [changed] => 1715349940 [source_title] => Introduction to Graph Computing [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => graphcomputing ) [aitech] => stdClass Object ( [course_code] => aitech [hr_nid] => 199320 [title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [requirements] => [overview] =>

This course is aimed at developers and data scientists who wish to understand and implement AI within their applications. Special focus is given to Data Analysis, Distributed AI and NLP.

[category_overview] => [outline] =>
  1. Distribution big data
    1. Data mining methods (training single systems + distributed prediction: traditional machine learning algorithms + Mapreduce distributed prediction)
    2. Apache Spark MLlib
  2. Recommendations and Advertising:
    1. Natural language
    2. Text clustering, text categorization (labeling), synonyms
    3. User profile restore, labeling system
    4. Recommended algorithms
    5. Insuring the accuracy of "lift" between and within categories
    6. How to create closed loops for recommendation algorithms
  3. Logical regression, RankingSVM,
  4. Feature recognition (deep learning and automatic feature recognition for graphics)
  5. Natural language
    1. Chinese word segmentation
    2. Theme model (text clustering)
    3. Text classification
    4. Extract keywords
    5. Semantic analysis, semantic parser, word2vec (vector to word)
    6. RNN long-term memory (TSTM) architecture
[language] => en [duration] => 21 [status] => published [changed] => 1715084120 [source_title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [source_language] => zh-hans [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => aitech ) [spmllib] => stdClass Object ( [course_code] => spmllib [hr_nid] => 141237 [title] => Apache Spark MLlib [requirements] =>

Knowledge of one of the following:

[overview] =>

MLlib is Spark’s machine learning (ML) library. Its goal is to make practical machine learning scalable and easy. It consists of common learning algorithms and utilities, including classification, regression, clustering, collaborative filtering, dimensionality reduction, as well as lower-level optimization primitives and higher-level pipeline APIs.

It divides into two packages:

 

Audience

This course is directed at engineers and developers seeking to utilize a built in Machine Library for Apache Spark

[category_overview] => [outline] =>

spark.mllib: data types, algorithms, and utilities

spark.ml: high-level APIs for ML pipelines

[language] => en [duration] => 35 [status] => published [changed] => 1700037209 [source_title] => Apache Spark MLlib [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => spmllib ) [kdd] => stdClass Object ( [course_code] => kdd [hr_nid] => 200632 [title] => Knowledge Discovery in Databases (KDD) [requirements] => [overview] =>

Knowledge discovery in databases (KDD) is the process of discovering useful knowledge from a collection of data. Real-life applications for this data mining technique include marketing, fraud detection, telecommunication and manufacturing.

In this instructor-led, live course, we introduce the processes involved in KDD and carry out a series of exercises to practice the implementation of those processes.

Audience

Format of the Course

[category_overview] => [outline] =>

Introduction

Establishing the application domain

Establishing relevant prior knowledge

Understanding the goal of the investigation

Creating a target data set

Data cleaning and preprocessing

Data reduction and projection

Choosing the data mining task

Choosing the data mining algorithms

Interpreting the mined patterns

Summary and conclusion

[language] => en [duration] => 21 [status] => published [changed] => 1700037259 [source_title] => Knowledge Discovery in Databases (KDD) [source_language] => en [cert_code] => [weight] => -987 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => kdd ) ) [codes] => Array ( [0] => datavault [1] => sparkstreaming [2] => ksql [3] => apacheignite [4] => beam [5] => apex [6] => storm [7] => nifi [8] => nifidev [9] => flink [10] => sparkpython [11] => graphcomputing [12] => aitech [13] => spmllib [14] => kdd ) ) [4] => Array ( [regions] => Array ( [ec_4966] => Array ( [tid] => ec_4966 [title] => Guayaquil [sales_area] => ec_ecuador [venues] => Array ( [ec_15661446] => Array ( [vid] => ec_15661446 [title] => Guayaquil - Mall del Sol [vfdc] => 175.00 [prices] => Array ( [1] => Array ( [remote guaranteed] => 5437 [classroom guaranteed] => 5787 [remote guaranteed per delegate] => 5437 [delegates] => 1 [adp] => 937 [classroom guaranteed per delegate] => 5787 ) [2] => Array ( [remote guaranteed] => 6374 [classroom guaranteed] => 6844 [remote guaranteed per delegate] => 3187 [delegates] => 2 [adp] => 937 [classroom guaranteed per delegate] => 3422 ) [3] => Array ( [remote guaranteed] => 7311 [classroom guaranteed] => 7902 [remote guaranteed per delegate] => 2437 [delegates] => 3 [adp] => 937 [classroom guaranteed per delegate] => 2634 ) [4] => Array ( [remote guaranteed] => 8248 [classroom guaranteed] => 8960 [remote guaranteed per delegate] => 2062 [delegates] => 4 [adp] => 937 [classroom guaranteed per delegate] => 2240 ) [5] => Array ( [remote guaranteed] => 9185 [classroom guaranteed] => 10015 [remote guaranteed per delegate] => 1837 [delegates] => 5 [adp] => 937 [classroom guaranteed per delegate] => 2003 ) [6] => Array ( [remote guaranteed] => 10122 [classroom guaranteed] => 11070 [remote guaranteed per delegate] => 1687 [delegates] => 6 [adp] => 937 [classroom guaranteed per delegate] => 1845 ) [7] => Array ( [remote guaranteed] => 11060 [classroom guaranteed] => 12131 [remote guaranteed per delegate] => 1580 [delegates] => 7 [adp] => 937 [classroom guaranteed per delegate] => 1733 ) [8] => Array ( [remote guaranteed] => 12000 [classroom guaranteed] => 13184 [remote guaranteed per delegate] => 1500 [delegates] => 8 [adp] => 937 [classroom guaranteed per delegate] => 1648 ) [9] => Array ( [remote guaranteed] => 12933 [classroom guaranteed] => 14247 [remote guaranteed per delegate] => 1437 [delegates] => 9 [adp] => 937 [classroom guaranteed per delegate] => 1583 ) [10] => Array ( [remote guaranteed] => 13870 [classroom guaranteed] => 15300 [remote guaranteed per delegate] => 1387 [delegates] => 10 [adp] => 937 [classroom guaranteed per delegate] => 1530 ) ) ) ) ) [ec_4967] => Array ( [tid] => ec_4967 [title] => Quito [sales_area] => ec_ecuador [venues] => Array ( [ec_15661447] => Array ( [vid] => ec_15661447 [title] => Quito - Av Eloy Alfaro [vfdc] => 200.00 [prices] => Array ( [1] => Array ( [remote guaranteed] => 5437 [classroom guaranteed] => 5837 [remote guaranteed per delegate] => 5437 [delegates] => 1 [adp] => 937 [classroom guaranteed per delegate] => 5837 ) [2] => Array ( [remote guaranteed] => 6374 [classroom guaranteed] => 6874 [remote guaranteed per delegate] => 3187 [delegates] => 2 [adp] => 937 [classroom guaranteed per delegate] => 3437 ) [3] => Array ( [remote guaranteed] => 7311 [classroom guaranteed] => 7911 [remote guaranteed per delegate] => 2437 [delegates] => 3 [adp] => 937 [classroom guaranteed per delegate] => 2637 ) [4] => Array ( [remote guaranteed] => 8248 [classroom guaranteed] => 8948 [remote guaranteed per delegate] => 2062 [delegates] => 4 [adp] => 937 [classroom guaranteed per delegate] => 2237 ) [5] => Array ( [remote guaranteed] => 9185 [classroom guaranteed] => 9985 [remote guaranteed per delegate] => 1837 [delegates] => 5 [adp] => 937 [classroom guaranteed per delegate] => 1997 ) [6] => Array ( [remote guaranteed] => 10122 [classroom guaranteed] => 11022 [remote guaranteed per delegate] => 1687 [delegates] => 6 [adp] => 937 [classroom guaranteed per delegate] => 1837 ) [7] => Array ( [remote guaranteed] => 11060 [classroom guaranteed] => 12061 [remote guaranteed per delegate] => 1580 [delegates] => 7 [adp] => 937 [classroom guaranteed per delegate] => 1723 ) [8] => Array ( [remote guaranteed] => 12000 [classroom guaranteed] => 13096 [remote guaranteed per delegate] => 1500 [delegates] => 8 [adp] => 937 [classroom guaranteed per delegate] => 1637 ) [9] => Array ( [remote guaranteed] => 12933 [classroom guaranteed] => 14130 [remote guaranteed per delegate] => 1437 [delegates] => 9 [adp] => 937 [classroom guaranteed per delegate] => 1570 ) [10] => Array ( [remote guaranteed] => 13870 [classroom guaranteed] => 15170 [remote guaranteed per delegate] => 1387 [delegates] => 10 [adp] => 937 [classroom guaranteed per delegate] => 1517 ) ) ) ) ) ) [remote] => Array ( [1] => Array ( [remote guaranteed] => 5437 [remote guaranteed per delegate] => 5437 [adp] => 937 ) [2] => Array ( [remote guaranteed] => 6374 [remote guaranteed per delegate] => 3187 [adp] => 937 ) [3] => Array ( [remote guaranteed] => 7311 [remote guaranteed per delegate] => 2437 [adp] => 937 ) [4] => Array ( [remote guaranteed] => 8248 [remote guaranteed per delegate] => 2062 [adp] => 937 ) [5] => Array ( [remote guaranteed] => 9185 [remote guaranteed per delegate] => 1837 [adp] => 937 ) [6] => Array ( [remote guaranteed] => 10122 [remote guaranteed per delegate] => 1687 [adp] => 937 ) [7] => Array ( [remote guaranteed] => 11060 [remote guaranteed per delegate] => 1580 [adp] => 937 ) [8] => Array ( [remote guaranteed] => 12000 [remote guaranteed per delegate] => 1500 [adp] => 937 ) [9] => Array ( [remote guaranteed] => 12933 [remote guaranteed per delegate] => 1437 [adp] => 937 ) [10] => Array ( [remote guaranteed] => 13870 [remote guaranteed per delegate] => 1387 [adp] => 937 ) ) [currency] => USD ) [5] => Array ( [0] => 5 [1] => 5 [2] => 4 [3] => 4 [4] => 5 ) [6] => Array ( [479923] => Array ( [title] => Apache NiFi for Developers [rating] => 5 [delegate_and_company] => Pedro [body] => I liked the virtual machine environments because he could easily toggle between the views and help if we were struggling with the material. [mc] => [is_mt] => 0 [nid] => 479923 ) [445523] => Array ( [title] => Python and Spark for Big Data (PySpark) [rating] => 5 [delegate_and_company] => Aurelia-Adriana - Allianz Services Romania [body] => I liked that it was practical. Loved to apply the theoretical knowledge with practical examples. [mc] => [is_mt] => 0 [nid] => 445523 ) [422075] => Array ( [title] => Apache NiFi for Administrators [rating] => 4 [delegate_and_company] => Rolando García - OIT para México y Cuba [body] => Muy poco, se me dificulto mucho y mas por que entre desfasado, no tome los primeras sesiones. [mc] => [is_mt] => 0 [nid] => 422075 ) [404743] => Array ( [title] => Data Vault: Building a Scalable Data Warehouse [rating] => 4 [delegate_and_company] => john ernesto ii fernandez - Philippine AXA Life Insurance Corporation [body] => how the trainor shows his knowledge in the subject he's teachign [mc] => [is_mt] => 0 [nid] => 404743 ) [283902] => Array ( [title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [rating] => 5 [delegate_and_company] => Laura Kahn [body] => This is one of the best hands-on with exercises programming courses I have ever taken. [mc] => This is one of the best hands-on with exercises programming courses I have ever taken. [is_mt] => 0 [nid] => 283902 ) ) [7] => 4.6 [8] => 1 [9] => 1 [10] => ) ) [6] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [7] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [8] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [9] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) ) NP URI: www.nobleprog.com.ec/en/cc/bdatr Undefined array key "nobleprog_site_production_url" /apps/nobleprog-website/includes/functions/new-modules-general-functions.php:82 Array ( [0] => Array ( [file] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [line] => 82 [function] => myErrorHandler [args] => Array ( [0] => 2 [1] => Undefined array key "nobleprog_site_production_url" [2] => /apps/nobleprog-website/includes/functions/new-modules-general-functions.php [3] => 82 ) ) [1] => Array ( [file] => /apps/hitra7/drupal7/sites/all/modules/_custom/frontend/islc7/islc7.module [line] => 131 [function] => variable_get [args] => Array ( [0] => nobleprog_site_production_url ) ) [2] => Array ( [file] => /apps/hitra7/drupal7/sites/all/modules/_custom/frontend/islc7/islc7_block.inc [line] => 44 [function] => islc_get_current_site [args] => Array ( ) ) [3] => Array ( [file] => /apps/nobleprog-website/nptemplates/default.php [line] => 265 [function] => islc7_sites_links_array_v3 [args] => Array ( ) ) [4] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 85 [args] => Array ( [0] => /apps/nobleprog-website/nptemplates/default.php ) [function] => require_once ) [5] => Array ( [file] => /apps/nobleprog-website/modules/course/course.php [line] => 31 [function] => course_render [args] => Array ( [0] => Array ( [course_code] => bdatr [hr_nid] => 279166 [title] => Big Data Analytics for Telecom Regulators [requirements] =>

There are no specific requirements needed to attend this course.

[overview] =>

To meet compliance of the regulators, CSPs (Communication service providers) can tap into Big Data Analytics which not only help them to meet compliance but within the scope of same project they can increase customer satisfaction and thus reduce the churn. In fact since compliance is related to Quality of service tied to a contract, any initiative towards meeting the compliance, will improve the “competitive edge” of the CSPs. Therefore, it is important that Regulators should be able to advise/guide a set of Big Data analytic practice for CSPs that will be of mutual benefit between the regulators and CSPs.

The course consists of 8 modules (4 on day 1, and 4 on day 2)

[category_overview] => [outline] =>

1. Module-1 : Case studies of how Telecom Regulators have used Big Data Analytics for imposing compliance :

2. Module-2 : Reviewing Millions of contract between CSPs and its users using unstructured Big data analytics

3. Module -3 : Extracting Structured information from unstructured Customer Contract and map them to Quality of Service obtained from IPDR data &amp; Crowd Sourced app data. Metric for Compliance. Automatic detection of compliance violations.

4. Module- 4 : USING app approach to collect compliance and QoS data- release a free regulatory mobile app to the users to track & Analyze automatically. In this approach regulatory authority will be releasing free app and distribute among the users-and the app will be collecting data on QoS/Spams etc and report it back in analytic dashboard form :

5. Module-5 : Processing of regulatory app data for automatic alarm system generation (alarms will be generated and emailed/sms to stake holders automatically) :
Implementation of dashboard and alarm service

6. Module-6 : Use IPDR data for QoS and Compliance-IPDR Big data analytics:

7. Module-7 : Customer Service Experience &amp; Big Data approach to CSP CRM :

8. Module-8 : Big Data ETL for integrating different QoS data source and combine to a single dashboard alarm based analytics:

[language] => en [duration] => 14 [status] => published [changed] => 1700037381 [source_title] => Big Data Analytics for Telecom Regulators [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) ) [1] => Array ( [0] => stdClass Object ( [tid] => 766 [alias] => big-data-training [name] => Big Data [english_name] => Big Data [consulting_option] => available_promoted ) ) [2] => bdatr [3] => Array ( [outlines] => Array ( [datavault] => stdClass Object ( [course_code] => datavault [hr_nid] => 210132 [title] => Data Vault: Building a Scalable Data Warehouse [requirements] =>

Audience

[overview] =>

Data Vault Modeling is a database modeling technique that provides long-term historical storage of data that originates from multiple sources. A data vault stores a single version of the facts, or "all the data, all the time". Its flexible, scalable, consistent and adaptable design encompasses the best aspects of 3rd normal form (3NF) and star schema.

In this instructor-led, live training, participants will learn how to build a Data Vault.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn how to build a Data Vault.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Data Vault architecture and design principles

Data Vault applications

Data Vault components

Building a Data Vault

Modeling Hubs, Links and Satellites

Data Vault reference rules

How components interact with each other

Modeling and populating a Data Vault

Converting 3NF OLTP to a Data Vault Enterprise Data Warehouse (EDW)

Understanding load dates, end-dates, and join operations

Business keys, relationships, link tables and join techniques

Query techniques

Load processing and query processing

Overview of Matrix Methodology

Getting data into data entities

Loading Hub Entities

Loading Link Entities

Loading Satellites

Using SEI/CMM Level 5 templates to obtain repeatable, reliable, and quantifiable results

Developing a consistent and repeatable ETL (Extract, Transform, Load) process

Building and deploying highly scalable and repeatable warehouses

Closing remarks

[language] => en [duration] => 28 [status] => published [changed] => 1715349914 [source_title] => Data Vault: Building a Scalable Data Warehouse [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => datavault ) [sparkstreaming] => stdClass Object ( [course_code] => sparkstreaming [hr_nid] => 356863 [title] => Spark Streaming with Python and Kafka [requirements] =>

Audience

[overview] =>

Apache Spark Streaming is a scalable, open source stream processing system that allows users to process real-time data from supported sources. Spark Streaming enables fault-tolerant processing of data streams.

This instructor-led, live training (online or onsite) is aimed at data engineers, data scientists, and programmers who wish to use Spark Streaming features in processing and analyzing real-time data.

By the end of this training, participants will be able to use Spark Streaming to process live data streams for use in databases, filesystems, and live dashboards.

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at data engineers, data scientists, and programmers who wish to use Spark Streaming features in processing and analyzing real-time data.

By the end of this training, participants will be able to use Spark Streaming to process live data streams for use in databases, filesystems, and live dashboards.

[outline] =>

Introduction

Overview of Spark Streaming Features and Architecture

Preparing the Environment

Processing Messages

Performing a Windowed Stream Processing

Prototyping the Processing Code

Streaming the Code

Acquiring Stream Output

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037710 [source_title] => Spark Streaming with Python and Kafka [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => sparkstreaming ) [ksql] => stdClass Object ( [course_code] => ksql [hr_nid] => 318463 [title] => Confluent KSQL [requirements] =>

Audience

[overview] =>

Confluent KSQL is a stream processing framework built on top of Apache Kafka. It enables real-time data processing using SQL operations.

This instructor-led, live training (online or onsite) is aimed at developers who wish to implement Apache Kafka stream processing without writing code.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at developers who wish to implement Apache Kafka stream processing without writing code.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Setting up Confluent KSQL

Overview of KSQL Features and Architecture

How KSQL Interacts with Apache Kafka

Use Cases for KSQL

KSQL Command Line and Operations

Ingesting Data (CSV, JSON, etc.)

Creating a Stream

Creating a Table

Advanced KSQL Operations (Joins, Windowing, Aggregations, Geospatial, etc.)

Deploying KSQL to Production

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037528 [source_title] => Confluent KSQL [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => ksql ) [apacheignite] => stdClass Object ( [course_code] => apacheignite [hr_nid] => 209621 [title] => Apache Ignite for Developers [requirements] =>

Audience

[overview] =>

Apache Ignite is an in-memory computing platform that sits between the application and data layer to improve speed, scale, and availability.

This instructor-led, live training (online or onsite) is aimed at developers who wish to learn the principles behind persistent and pure in-memory storage as they step through the creation of a sample in-memory computing project.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) is aimed at developers who wish to learn the principles behind persistent and pure in-memory storage as they step through the creation of a sample in-memory computing project.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Big Data Tools and Technologies

Installing and Configuring Apache Ignite

Overview of Ignite Architecture

Querying Data in Ignite

Spreading Large Data Sets across a Cluster

Understanding the In-Memory Data Grid

Writing a Service in Ignite

Running Distributed Computing with Ignite

Integrating Ignite with RDBMS, NoSQL, Hadoop and Machine Learning Processors

Testing and Troubleshooting

Summary and Next Steps

[language] => en [duration] => 14 [status] => published [changed] => 1700037322 [source_title] => Apache Ignite for Developers [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => apacheignite ) [beam] => stdClass Object ( [course_code] => beam [hr_nid] => 283646 [title] => Unified Batch and Stream Processing with Apache Beam [requirements] =>

Audience

[overview] =>

Apache Beam is an open source, unified programming model for defining and executing parallel data processing pipelines. It's power lies in its ability to run both batch and streaming pipelines, with execution being carried out by one of Beam's supported distributed processing back-ends: Apache Apex, Apache Flink, Apache Spark, and Google Cloud Dataflow. Apache Beam is useful for ETL (Extract, Transform, and Load) tasks such as moving data between different storage media and data sources, transforming data into a more desirable format, and loading data onto a new system.

In this instructor-led, live training (onsite or remote), participants will learn how to implement the Apache Beam SDKs in a Java or Python application that defines a data processing pipeline for decomposing a big data set into smaller chunks for independent, parallel processing.

By the end of this training, participants will be able to:

Format of the Course

Note

[category_overview] => [outline] =>

Introduction

Installing and Configuring Apache Beam

Overview of Apache Beam Features and Architecture

Understanding the Apache Beam Programming Model

Running a sample pipeline

Designing a Pipeline

Creating the Pipeline

Executing the Pipeline

Testing and Debugging Apache Beam

Processing Bounded and Unbounded Datasets

Making Your Pipelines Reusable and Maintainable

Create New Data Sources and Sinks

Integrating Apache Beam with other Big Data Systems

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 14 [status] => published [changed] => 1700037430 [source_title] => Unified Batch and Stream Processing with Apache Beam [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => beam ) [apex] => stdClass Object ( [course_code] => apex [hr_nid] => 209525 [title] => Apache Apex: Processing Big Data-in-Motion [requirements] =>

Audience

[overview] =>

Apache Apex is a YARN-native platform that unifies stream and batch processing. It processes big data-in-motion in a way that is scalable, performant, fault-tolerant, stateful, secure, distributed, and easily operable.

This instructor-led, live training introduces Apache Apex's unified stream processing architecture, and walks participants through the creation of a distributed application using Apex on Hadoop.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] => [outline] =>

To request a customized course outline for this training, please contact us.

 

[language] => en [duration] => 21 [status] => published [changed] => 1700037320 [source_title] => Apache Apex: Processing Big Data-in-Motion [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => apex ) [storm] => stdClass Object ( [course_code] => storm [hr_nid] => 208253 [title] => Apache Storm [requirements] => [overview] =>

Apache Storm is a distributed, real-time computation engine used for enabling real-time business intelligence. It does so by enabling applications to reliably process unbounded streams of data (a.k.a. stream processing).

"Storm is for real-time processing what Hadoop is for batch processing!"

In this instructor-led live training, participants will learn how to install and configure Apache Storm, then develop and deploy an Apache Storm application for processing big data in real-time.

Some of the topics included in this training include:

Request this course now!

Audience

Format of the course

[category_overview] => [outline] =>

Request a customized course outline for this training!

[language] => en [duration] => 28 [status] => published [changed] => 1700037303 [source_title] => Apache Storm [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => storm ) [nifi] => stdClass Object ( [course_code] => nifi [hr_nid] => 212800 [title] => Apache NiFi for Administrators [requirements] =>

Audience

[overview] =>

Apache NiFi (Hortonworks DataFlow) is a real-time integrated data logistics and simple event processing platform that enables the moving, tracking and automation of data between systems. It is written using flow-based programming and provides a web-based user interface to manage dataflows in real time.

In this instructor-led, live training (onsite or remote), participants will learn how to deploy and manage Apache NiFi in a live lab environment.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

In this instructor-led, live training in <loc> (onsite or remote), participants will learn how to deploy and manage Apache NiFi in a live lab environment.

By the end of this training, participants will be able to:

[outline] =>

Introduction to Apache NiFi   

Overview of Big Data and Apache Hadoop

Setting up and Running a NiFi Cluster

NiFi Operations

Monitoring and Recovery

Optimizing NiFI

Best practices

Troubleshooting

Summary and Conclusion

[language] => en [duration] => 21 [status] => published [changed] => 1700037360 [source_title] => Apache NiFi for Administrators [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => nifi ) [nifidev] => stdClass Object ( [course_code] => nifidev [hr_nid] => 212804 [title] => Apache NiFi for Developers [requirements] =>

Audience

[overview] =>

Apache NiFi (Hortonworks DataFlow) is a real-time integrated data logistics and simple event processing platform that enables the moving, tracking and automation of data between systems. It is written using flow-based programming and provides a web-based user interface to manage dataflows in real time.

In this instructor-led, live training, participants will learn the fundamentals of flow-based programming as they develop a number of demo extensions, components and processors using Apache NiFi.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn the fundamentals of flow-based programming as they develop a number of demo extensions, components and processors using Apache NiFi.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Overview of Big Data Tools and Technologies

Installing and Configuring NiFi

Overview of NiFi Architecture

Development Approaches

Design Considerations

Components, Events, and Processor Patterns

Exercise: Streaming Data Feeds into HDFS

Error Handling

Controller Services

Exercise: Ingesting Data from IoT Devices using Web-Based APIs

Exercise: Developing a Custom Apache Nifi Processor using JSON

Testing and Troubleshooting

Contributing to Apache NiFi

Summary and Conclusion

[language] => en [duration] => 7 [status] => published [changed] => 1700037361 [source_title] => Apache NiFi for Developers [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => nifidev ) [flink] => stdClass Object ( [course_code] => flink [hr_nid] => 209489 [title] => Apache Flink Fundamentals [requirements] =>

Audience

[overview] =>

Apache Flink is an open-source framework for scalable stream and batch data processing.

This instructor-led, live training (online or onsite) introduces the principles and approaches behind distributed stream and batch data processing, and walks participants through the creation of a real-time, data streaming application in Apache Flink.

By the end of this training, participants will be able to:

Format of the Course

Course Customization Options

[category_overview] =>

This instructor-led, live training in <loc> (online or onsite) introduces the principles and approaches behind distributed stream and batch data processing, and walks participants through the creation of a real-time, data streaming application in Apache Flink.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Installing and Configuring Apache Flink

Overview of Flink Architecture

Developing Data Streaming Applications in Flink

Managing Diverse Workloads

Performing Advanced Analytics

Setting up a Multi-Node Flink Cluster

Mastering Flink DataStream API

Understanding Flink Libraries

Integrating Flink with Other Big Data Tools

Testing and Troubleshooting

Summary and Next Steps

[language] => en [duration] => 28 [status] => published [changed] => 1700037319 [source_title] => Apache Flink Fundamentals [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => flink ) [sparkpython] => stdClass Object ( [course_code] => sparkpython [hr_nid] => 279430 [title] => Python and Spark for Big Data (PySpark) [requirements] =>

Audience

[overview] =>

Python is a high-level programming language famous for its clear syntax and code readibility. Spark is a data processing engine used in querying, analyzing, and transforming big data. PySpark allows users to interface Spark with Python.

In this instructor-led, live training, participants will learn how to use Python and Spark together to analyze big data as they work on hands-on exercises.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn how to use Python and Spark together to analyze big data as they work on hands-on exercises.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Understanding Big Data

Overview of Spark

Overview of Python

Overview of PySpark

Setting Up Python with Spark

Setting Up PySpark

Using Amazon Web Services (AWS) EC2 Instances for Spark

Setting Up Databricks

Setting Up the AWS EMR Cluster

Learning the Basics of Python Programming

Learning the Basics of Spark DataFrame

Working on a Spark DataFrame Project Exercise

Understanding Machine Learning with MLlib

Working with MLlib, Spark, and Python for Machine Learning

Understanding Regressions

Understanding Random Forests and Decision Trees

Working with K-means Clustering

Working with Recommender Systems

Implementing Natural Language Processing

Streaming with Spark on Python

Closing Remarks

[language] => en [duration] => 21 [status] => published [changed] => 1715349940 [source_title] => Python and Spark for Big Data (PySpark) [source_language] => en [cert_code] => [weight] => -998 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => sparkpython ) [graphcomputing] => stdClass Object ( [course_code] => graphcomputing [hr_nid] => 278402 [title] => Introduction to Graph Computing [requirements] =>

Audience

[overview] =>

Many real world problems can be described in terms of graphs. For example, the Web graph, the social network graph, the train network graph and the language graph. These graphs tend to be extremely large; processing them requires a specialized set of tools and processes -- these tools and processes can be referred to as Graph Computing (also known as Graph Analytics).

In this instructor-led, live training, participants will learn about the technology offerings and implementation approaches for processing graph data. The aim is to identify real-world objects, their characteristics and relationships, then model these relationships and process them as data using a Graph Computing (also known as Graph Analytics and Distributed Graph Processing) approach. We start with a broad overview and narrow in on specific tools as we step through a series of case studies, hands-on exercises and live deployments.

By the end of this training, participants will be able to:

Format of the course

[category_overview] =>

In this instructor-led, live training in <loc>, participants will learn about the technology offerings and implementation approaches for processing graph data. The aim is to identify real-world objects, their characteristics and relationships, then model these relationships and process them as data using a Graph Computing (also known as Graph Analytics) approach. We start with a broad overview and narrow in on specific tools as we step through a series of case studies, hands-on exercises and live deployments.

By the end of this training, participants will be able to:

[outline] =>

Introduction

Understanding Graph Data

Using Graph Databases to Model, Persist and Process Graph Data

Exercise: Modeling Graph Data with neo4j

Beyond Graph Databases: Graph Computing

Solving Real-World Problems with Traversals

Case Study: Ranking Discussion Contributors

Graph Computing: Local, In-Memory Graph toolkits

Exercise: Modeling Graph Data with NetworkX

Graph Computing: Batch Processing Graph Frameworks

Graph Computing: Graph-Parallel Computation

Setup and Installation

GraphX Operators

Iterating with Pregel API

Building a Graph

Designing Scalable Algorithms

Accessing Additional Algorithms

Exercis: Page Rank and Top Users

Deploying to Production

Closing Remarks

[language] => en [duration] => 28 [status] => published [changed] => 1715349940 [source_title] => Introduction to Graph Computing [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => graphcomputing ) [aitech] => stdClass Object ( [course_code] => aitech [hr_nid] => 199320 [title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [requirements] => [overview] =>

This course is aimed at developers and data scientists who wish to understand and implement AI within their applications. Special focus is given to Data Analysis, Distributed AI and NLP.

[category_overview] => [outline] =>
  1. Distribution big data
    1. Data mining methods (training single systems + distributed prediction: traditional machine learning algorithms + Mapreduce distributed prediction)
    2. Apache Spark MLlib
  2. Recommendations and Advertising:
    1. Natural language
    2. Text clustering, text categorization (labeling), synonyms
    3. User profile restore, labeling system
    4. Recommended algorithms
    5. Insuring the accuracy of "lift" between and within categories
    6. How to create closed loops for recommendation algorithms
  3. Logical regression, RankingSVM,
  4. Feature recognition (deep learning and automatic feature recognition for graphics)
  5. Natural language
    1. Chinese word segmentation
    2. Theme model (text clustering)
    3. Text classification
    4. Extract keywords
    5. Semantic analysis, semantic parser, word2vec (vector to word)
    6. RNN long-term memory (TSTM) architecture
[language] => en [duration] => 21 [status] => published [changed] => 1715084120 [source_title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [source_language] => zh-hans [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => aitech ) [spmllib] => stdClass Object ( [course_code] => spmllib [hr_nid] => 141237 [title] => Apache Spark MLlib [requirements] =>

Knowledge of one of the following:

[overview] =>

MLlib is Spark’s machine learning (ML) library. Its goal is to make practical machine learning scalable and easy. It consists of common learning algorithms and utilities, including classification, regression, clustering, collaborative filtering, dimensionality reduction, as well as lower-level optimization primitives and higher-level pipeline APIs.

It divides into two packages:

 

Audience

This course is directed at engineers and developers seeking to utilize a built in Machine Library for Apache Spark

[category_overview] => [outline] =>

spark.mllib: data types, algorithms, and utilities

spark.ml: high-level APIs for ML pipelines

[language] => en [duration] => 35 [status] => published [changed] => 1700037209 [source_title] => Apache Spark MLlib [source_language] => en [cert_code] => [weight] => 0 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => spmllib ) [kdd] => stdClass Object ( [course_code] => kdd [hr_nid] => 200632 [title] => Knowledge Discovery in Databases (KDD) [requirements] => [overview] =>

Knowledge discovery in databases (KDD) is the process of discovering useful knowledge from a collection of data. Real-life applications for this data mining technique include marketing, fraud detection, telecommunication and manufacturing.

In this instructor-led, live course, we introduce the processes involved in KDD and carry out a series of exercises to practice the implementation of those processes.

Audience

Format of the Course

[category_overview] => [outline] =>

Introduction

Establishing the application domain

Establishing relevant prior knowledge

Understanding the goal of the investigation

Creating a target data set

Data cleaning and preprocessing

Data reduction and projection

Choosing the data mining task

Choosing the data mining algorithms

Interpreting the mined patterns

Summary and conclusion

[language] => en [duration] => 21 [status] => published [changed] => 1700037259 [source_title] => Knowledge Discovery in Databases (KDD) [source_language] => en [cert_code] => [weight] => -987 [excluded_sites] => [use_mt] => stdClass Object ( [field_overview] => [field_course_outline] => [field_prerequisits] => [field_overview_in_category] => ) [cc] => kdd ) ) [codes] => Array ( [0] => datavault [1] => sparkstreaming [2] => ksql [3] => apacheignite [4] => beam [5] => apex [6] => storm [7] => nifi [8] => nifidev [9] => flink [10] => sparkpython [11] => graphcomputing [12] => aitech [13] => spmllib [14] => kdd ) ) [4] => Array ( [regions] => Array ( [ec_4966] => Array ( [tid] => ec_4966 [title] => Guayaquil [sales_area] => ec_ecuador [venues] => Array ( [ec_15661446] => Array ( [vid] => ec_15661446 [title] => Guayaquil - Mall del Sol [vfdc] => 175.00 [prices] => Array ( [1] => Array ( [remote guaranteed] => 5437 [classroom guaranteed] => 5787 [remote guaranteed per delegate] => 5437 [delegates] => 1 [adp] => 937 [classroom guaranteed per delegate] => 5787 ) [2] => Array ( [remote guaranteed] => 6374 [classroom guaranteed] => 6844 [remote guaranteed per delegate] => 3187 [delegates] => 2 [adp] => 937 [classroom guaranteed per delegate] => 3422 ) [3] => Array ( [remote guaranteed] => 7311 [classroom guaranteed] => 7902 [remote guaranteed per delegate] => 2437 [delegates] => 3 [adp] => 937 [classroom guaranteed per delegate] => 2634 ) [4] => Array ( [remote guaranteed] => 8248 [classroom guaranteed] => 8960 [remote guaranteed per delegate] => 2062 [delegates] => 4 [adp] => 937 [classroom guaranteed per delegate] => 2240 ) [5] => Array ( [remote guaranteed] => 9185 [classroom guaranteed] => 10015 [remote guaranteed per delegate] => 1837 [delegates] => 5 [adp] => 937 [classroom guaranteed per delegate] => 2003 ) [6] => Array ( [remote guaranteed] => 10122 [classroom guaranteed] => 11070 [remote guaranteed per delegate] => 1687 [delegates] => 6 [adp] => 937 [classroom guaranteed per delegate] => 1845 ) [7] => Array ( [remote guaranteed] => 11060 [classroom guaranteed] => 12131 [remote guaranteed per delegate] => 1580 [delegates] => 7 [adp] => 937 [classroom guaranteed per delegate] => 1733 ) [8] => Array ( [remote guaranteed] => 12000 [classroom guaranteed] => 13184 [remote guaranteed per delegate] => 1500 [delegates] => 8 [adp] => 937 [classroom guaranteed per delegate] => 1648 ) [9] => Array ( [remote guaranteed] => 12933 [classroom guaranteed] => 14247 [remote guaranteed per delegate] => 1437 [delegates] => 9 [adp] => 937 [classroom guaranteed per delegate] => 1583 ) [10] => Array ( [remote guaranteed] => 13870 [classroom guaranteed] => 15300 [remote guaranteed per delegate] => 1387 [delegates] => 10 [adp] => 937 [classroom guaranteed per delegate] => 1530 ) ) ) ) ) [ec_4967] => Array ( [tid] => ec_4967 [title] => Quito [sales_area] => ec_ecuador [venues] => Array ( [ec_15661447] => Array ( [vid] => ec_15661447 [title] => Quito - Av Eloy Alfaro [vfdc] => 200.00 [prices] => Array ( [1] => Array ( [remote guaranteed] => 5437 [classroom guaranteed] => 5837 [remote guaranteed per delegate] => 5437 [delegates] => 1 [adp] => 937 [classroom guaranteed per delegate] => 5837 ) [2] => Array ( [remote guaranteed] => 6374 [classroom guaranteed] => 6874 [remote guaranteed per delegate] => 3187 [delegates] => 2 [adp] => 937 [classroom guaranteed per delegate] => 3437 ) [3] => Array ( [remote guaranteed] => 7311 [classroom guaranteed] => 7911 [remote guaranteed per delegate] => 2437 [delegates] => 3 [adp] => 937 [classroom guaranteed per delegate] => 2637 ) [4] => Array ( [remote guaranteed] => 8248 [classroom guaranteed] => 8948 [remote guaranteed per delegate] => 2062 [delegates] => 4 [adp] => 937 [classroom guaranteed per delegate] => 2237 ) [5] => Array ( [remote guaranteed] => 9185 [classroom guaranteed] => 9985 [remote guaranteed per delegate] => 1837 [delegates] => 5 [adp] => 937 [classroom guaranteed per delegate] => 1997 ) [6] => Array ( [remote guaranteed] => 10122 [classroom guaranteed] => 11022 [remote guaranteed per delegate] => 1687 [delegates] => 6 [adp] => 937 [classroom guaranteed per delegate] => 1837 ) [7] => Array ( [remote guaranteed] => 11060 [classroom guaranteed] => 12061 [remote guaranteed per delegate] => 1580 [delegates] => 7 [adp] => 937 [classroom guaranteed per delegate] => 1723 ) [8] => Array ( [remote guaranteed] => 12000 [classroom guaranteed] => 13096 [remote guaranteed per delegate] => 1500 [delegates] => 8 [adp] => 937 [classroom guaranteed per delegate] => 1637 ) [9] => Array ( [remote guaranteed] => 12933 [classroom guaranteed] => 14130 [remote guaranteed per delegate] => 1437 [delegates] => 9 [adp] => 937 [classroom guaranteed per delegate] => 1570 ) [10] => Array ( [remote guaranteed] => 13870 [classroom guaranteed] => 15170 [remote guaranteed per delegate] => 1387 [delegates] => 10 [adp] => 937 [classroom guaranteed per delegate] => 1517 ) ) ) ) ) ) [remote] => Array ( [1] => Array ( [remote guaranteed] => 5437 [remote guaranteed per delegate] => 5437 [adp] => 937 ) [2] => Array ( [remote guaranteed] => 6374 [remote guaranteed per delegate] => 3187 [adp] => 937 ) [3] => Array ( [remote guaranteed] => 7311 [remote guaranteed per delegate] => 2437 [adp] => 937 ) [4] => Array ( [remote guaranteed] => 8248 [remote guaranteed per delegate] => 2062 [adp] => 937 ) [5] => Array ( [remote guaranteed] => 9185 [remote guaranteed per delegate] => 1837 [adp] => 937 ) [6] => Array ( [remote guaranteed] => 10122 [remote guaranteed per delegate] => 1687 [adp] => 937 ) [7] => Array ( [remote guaranteed] => 11060 [remote guaranteed per delegate] => 1580 [adp] => 937 ) [8] => Array ( [remote guaranteed] => 12000 [remote guaranteed per delegate] => 1500 [adp] => 937 ) [9] => Array ( [remote guaranteed] => 12933 [remote guaranteed per delegate] => 1437 [adp] => 937 ) [10] => Array ( [remote guaranteed] => 13870 [remote guaranteed per delegate] => 1387 [adp] => 937 ) ) [currency] => USD ) [5] => Array ( [0] => 5 [1] => 5 [2] => 4 [3] => 4 [4] => 5 ) [6] => Array ( [479923] => Array ( [title] => Apache NiFi for Developers [rating] => 5 [delegate_and_company] => Pedro [body] => I liked the virtual machine environments because he could easily toggle between the views and help if we were struggling with the material. [mc] => [is_mt] => 0 [nid] => 479923 ) [445523] => Array ( [title] => Python and Spark for Big Data (PySpark) [rating] => 5 [delegate_and_company] => Aurelia-Adriana - Allianz Services Romania [body] => I liked that it was practical. Loved to apply the theoretical knowledge with practical examples. [mc] => [is_mt] => 0 [nid] => 445523 ) [422075] => Array ( [title] => Apache NiFi for Administrators [rating] => 4 [delegate_and_company] => Rolando García - OIT para México y Cuba [body] => Muy poco, se me dificulto mucho y mas por que entre desfasado, no tome los primeras sesiones. [mc] => [is_mt] => 0 [nid] => 422075 ) [404743] => Array ( [title] => Data Vault: Building a Scalable Data Warehouse [rating] => 4 [delegate_and_company] => john ernesto ii fernandez - Philippine AXA Life Insurance Corporation [body] => how the trainor shows his knowledge in the subject he's teachign [mc] => [is_mt] => 0 [nid] => 404743 ) [283902] => Array ( [title] => Artificial Intelligence - the most applied stuff - Data Analysis + Distributed AI + NLP [rating] => 5 [delegate_and_company] => Laura Kahn [body] => This is one of the best hands-on with exercises programming courses I have ever taken. [mc] => This is one of the best hands-on with exercises programming courses I have ever taken. [is_mt] => 0 [nid] => 283902 ) ) [7] => 4.6 [8] => 1 [9] => 1 [10] => ) ) [6] => Array ( [file] => /apps/nobleprog-website/core/routes.php [line] => 19 [function] => course_menu_callback [args] => Array ( [0] => /en/cc/bdatr ) ) [7] => Array ( [file] => /apps/nobleprog-website/__index.php [line] => 100 [args] => Array ( [0] => /apps/nobleprog-website/core/routes.php ) [function] => require_once ) [8] => Array ( [file] => /apps/nobleprog-website/_index.php [line] => 26 [args] => Array ( [0] => /apps/nobleprog-website/__index.php ) [function] => include_once ) [9] => Array ( [file] => /apps/hitra7/index.php [line] => 54 [args] => Array ( [0] => /apps/nobleprog-website/_index.php ) [function] => include_once ) )