diff --git a/README.md b/README.md index 12f3b9ac..c295aedc 100755 --- a/README.md +++ b/README.md @@ -33,21 +33,32 @@ See CONTRIBUTING.md. ## Changelog -### v0.6 - Sprint 2013 (tbc) -[v0.6 milestone](https://github.com/okfn/recline/issues?milestone=5) +### v0.7 - Summer 2014 (tbc) + +[v0.7 milestone](https://github.com/okfn/recline/issues?milestone=7) Possible breaking changes + * Support for row/add/delete/Reorder for recline slickGrid check `_includes/recline-deps.html` for slcikGrid plugins required #396 +* Upgraded timelinejs lib - #316 +* Removed csv backend (as now in separate repo) #444 + +### v0.6 - Summer 2013 + +[v0.6 milestone](https://github.com/okfn/recline/issues?milestone=5) (more than 40 issues) + +Possible breaking changes + * Many backends moved to their own repositories #314 +* Upgarde to Backbone v1.0 #351 * Updated Leaflet to latest version 0.4.4 #220 -* Added marker clustering in map view to handle a large number of markers +* Added marker clustering in map view to handle a large number of markers (and allowed it to disabled) * Dataset.restore method removed (not used internally except from Multiview.restore) * Views no longer call render in initialize but must be called client code * Backend.Memory.Store attribute for holding 'records' renamed to `records` from `data` -* Require new underscore.deferred vendor library for all use (jQuery no longer required if just using recline.dataset.js) +* Option to use underscore.deferred vendor library and not use jQuery (jQuery no longer required if just using recline.dataset.js) * View.el is now the raw DOM element. If you want a jQuery-wrapped version, use view.$el. #350 -* Upgraded timelinejs lib - #316 * Pager widget now takes Dataset object rather than QueryState object #386 ### v0.5 - July 5th 2012 (first public release) diff --git a/_includes/backend-list.html b/_includes/backend-list.html index 315f856f..af33cb01 100644 --- a/_includes/backend-list.html +++ b/_includes/backend-list.html @@ -1,10 +1,8 @@ -
- // {
- // records: [ [...], [...], ... ],
- // metadata: { may be some metadata e.g. file name }
- // useMemoryStore: true
- // }
- //
- my.fetch = function(dataset) {
- var dfd = new Deferred();
- if (dataset.file) {
- var reader = new FileReader();
- var encoding = dataset.encoding || 'UTF-8';
- reader.onload = function(e) {
- var out = my.extractFields(my.parseCSV(e.target.result, dataset), dataset);
- out.useMemoryStore = true;
- out.metadata = {
- filename: dataset.file.name
- }
- dfd.resolve(out);
- };
- reader.onerror = function (e) {
- alert('Failed to load file. Code: ' + e.target.error.code);
- };
- reader.readAsText(dataset.file, encoding);
- } else if (dataset.data) {
- var out = my.extractFields(my.parseCSV(dataset.data, dataset), dataset);
- out.useMemoryStore = true;
- dfd.resolve(out);
- } else if (dataset.url) {
- jQuery.get(dataset.url).done(function(data) {
- var out = my.extractFields(my.parseCSV(data, dataset), dataset);
- out.useMemoryStore = true;
- dfd.resolve(out);
- });
- }
- return dfd.promise();
- };
-
- // Convert array of rows in { records: [ ...] , fields: [ ... ] }
- // @param {Boolean} noHeaderRow If true assume that first row is not a header (i.e. list of fields but is data.
- my.extractFields = function(rows, noFields) {
- if (noFields.noHeaderRow !== true && rows.length > 0) {
- return {
- fields: rows[0],
- records: rows.slice(1)
- }
- } else {
- return {
- records: rows
- }
- }
- };
-
- // ## parseCSV
- //
- // Converts a Comma Separated Values string into an array of arrays.
- // Each line in the CSV becomes an array.
- //
- // Empty fields are converted to nulls and non-quoted numbers are converted to integers or floats.
- //
- // @return The CSV parsed as an array
- // @type Array
- //
- // @param {String} s The string to convert
- // @param {Object} options Options for loading CSV including
- // @param {Boolean} [trim=false] If set to True leading and trailing
- // whitespace is stripped off of each non-quoted field as it is imported
- // @param {String} [delimiter=','] A one-character string used to separate
- // fields. It defaults to ','
- // @param {String} [quotechar='"'] A one-character string used to quote
- // fields containing special characters, such as the delimiter or
- // quotechar, or which contain new-line characters. It defaults to '"'
- //
- // @param {Integer} skipInitialRows A integer number of rows to skip (default 0)
- //
- // Heavily based on uselesscode's JS CSV parser (MIT Licensed):
- // http://www.uselesscode.org/javascript/csv/
- my.parseCSV= function(s, options) {
- // Get rid of any trailing \n
- s = chomp(s);
-
- var options = options || {};
- var trm = (options.trim === false) ? false : true;
- var delimiter = options.delimiter || ',';
- var quotechar = options.quotechar || '"';
-
- var cur = '', // The character we are currently processing.
- inQuote = false,
- fieldQuoted = false,
- field = '', // Buffer for building up the current field
- row = [],
- out = [],
- i,
- processField;
-
- processField = function (field) {
- if (fieldQuoted !== true) {
- // If field is empty set to null
- if (field === '') {
- field = null;
- // If the field was not quoted and we are trimming fields, trim it
- } else if (trm === true) {
- field = trim(field);
- }
-
- // Convert unquoted numbers to their appropriate types
- if (rxIsInt.test(field)) {
- field = parseInt(field, 10);
- } else if (rxIsFloat.test(field)) {
- field = parseFloat(field, 10);
- }
- }
- return field;
- };
-
- for (i = 0; i < s.length; i += 1) {
- cur = s.charAt(i);
-
- // If we are at a EOF or EOR
- if (inQuote === false && (cur === delimiter || cur === "\n")) {
- field = processField(field);
- // Add the current field to the current row
- row.push(field);
- // If this is EOR append row to output and flush row
- if (cur === "\n") {
- out.push(row);
- row = [];
- }
- // Flush the field buffer
- field = '';
- fieldQuoted = false;
- } else {
- // If it's not a quotechar, add it to the field buffer
- if (cur !== quotechar) {
- field += cur;
- } else {
- if (!inQuote) {
- // We are not in a quote, start a quote
- inQuote = true;
- fieldQuoted = true;
- } else {
- // Next char is quotechar, this is an escaped quotechar
- if (s.charAt(i + 1) === quotechar) {
- field += quotechar;
- // Skip the next char
- i += 1;
- } else {
- // It's not escaping, so end quote
- inQuote = false;
- }
- }
- }
- }
- }
-
- // Add the last field
- field = processField(field);
- row.push(field);
- out.push(row);
-
- // Expose the ability to discard initial rows
- if (options.skipInitialRows) out = out.slice(options.skipInitialRows);
-
- return out;
- };
-
- // ## serializeCSV
- //
- // Convert an Object or a simple array of arrays into a Comma
- // Separated Values string.
- //
- // Nulls are converted to empty fields and integers or floats are converted to non-quoted numbers.
- //
- // @return The array serialized as a CSV
- // @type String
- //
- // @param {Object or Array} dataToSerialize The Object or array of arrays to convert. Object structure must be as follows:
- //
- // {
- // fields: [ {id: .., ...}, {id: ...,
- // records: [ { record }, { record }, ... ]
- // ... // more attributes we do not care about
- // }
- //
- // @param {object} options Options for serializing the CSV file including
- // delimiter and quotechar (see parseCSV options parameter above for
- // details on these).
- //
- // Heavily based on uselesscode's JS CSV serializer (MIT Licensed):
- // http://www.uselesscode.org/javascript/csv/
- my.serializeCSV= function(dataToSerialize, options) {
- var a = null;
- if (dataToSerialize instanceof Array) {
- a = dataToSerialize;
- } else {
- a = [];
- var fieldNames = _.pluck(dataToSerialize.fields, 'id');
- a.push(fieldNames);
- _.each(dataToSerialize.records, function(record, index) {
- var tmp = _.map(fieldNames, function(fn) {
- return record[fn];
- });
- a.push(tmp);
- });
- }
- var options = options || {};
- var delimiter = options.delimiter || ',';
- var quotechar = options.quotechar || '"';
-
- var cur = '', // The character we are currently processing.
- field = '', // Buffer for building up the current field
- row = '',
- out = '',
- i,
- j,
- processField;
-
- processField = function (field) {
- if (field === null) {
- // If field is null set to empty string
- field = '';
- } else if (typeof field === "string" && rxNeedsQuoting.test(field)) {
- // Convert string to delimited string
- field = quotechar + field + quotechar;
- } else if (typeof field === "number") {
- // Convert number to string
- field = field.toString(10);
- }
-
- return field;
- };
-
- for (i = 0; i < a.length; i += 1) {
- cur = a[i];
-
- for (j = 0; j < cur.length; j += 1) {
- field = processField(cur[j]);
- // If this is EOR append row to output and flush row
- if (j === (cur.length - 1)) {
- row += field;
- out += row + "\n";
- row = '';
- } else {
- // Add the current field to the current row
- row += field + delimiter;
- }
- // Flush the field buffer
- field = '';
- }
- }
-
- return out;
- };
-
- var rxIsInt = /^\d+$/,
- rxIsFloat = /^\d*\.\d+$|^\d+\.\d*$/,
- // If a string has leading or trailing space,
- // contains a comma double quote or a newline
- // it needs to be quoted in CSV output
- rxNeedsQuoting = /^\s|\s$|,|"|\n/,
- trim = (function () {
- // Fx 3.1 has a native trim function, it's about 10x faster, use it if it exists
- if (String.prototype.trim) {
- return function (s) {
- return s.trim();
- };
- } else {
- return function (s) {
- return s.replace(/^\s*/, '').replace(/\s*$/, '');
- };
- }
- }());
-
- function chomp(s) {
- if (s.charAt(s.length - 1) !== "\n") {
- // Does not end with \n, just return string
- return s;
- } else {
- // Remove the \n
- return s.substring(0, s.length - 1);
- }
- }
-
-
-}(this.recline.Backend.CSV));
-this.recline = this.recline || {};
-this.recline.Backend = this.recline.Backend || {};
this.recline.Backend.DataProxy = this.recline.Backend.DataProxy || {};
(function(my) {
@@ -2813,7 +2506,7 @@ this.recline.View = this.recline.View || {};
// Manage multiple views together along with query editor etc. Usage:
//
//
-// var myExplorer = new model.recline.MultiView({
+// var myExplorer = new recline.View.MultiView({
// model: {{recline.Model.Dataset instance}}
// el: {{an existing dom element}}
// views: {{dataset views}}
@@ -2863,7 +2556,7 @@ this.recline.View = this.recline.View || {};
// {
// id: 'filterEditor', // used for routing
// label: 'Filters', // used for view switcher
-// view: new recline.View.FielterEditor({
+// view: new recline.View.FilterEditor({
// model: dataset
// })
// },
@@ -2881,10 +2574,10 @@ this.recline.View = this.recline.View || {};
// special as it includes config of many of the subviews.
//
//
-// state = {
+// var state = {
// query: {dataset query state - see dataset.queryState object}
-// view-{id1}: {view-state for this view}
-// view-{id2}: {view-state for }
+// 'view-{id1}': {view-state for this view}
+// 'view-{id2}': {view-state for }
// ...
// // Explorer
// currentView: id of current view (defaults to first view if not specified)
diff --git a/docs/backends.markdown b/docs/backends.markdown
index 527f9014..09adb183 100644
--- a/docs/backends.markdown
+++ b/docs/backends.markdown
@@ -20,17 +20,25 @@ on the backend.
Backends come in 2 flavours:
-1. Loader backends - only implement fetch method. The data is then cached in a Memory.Store on the Dataset and interacted with there. This is best for sources which just allow you to load data or where you want to load the data once and work with it locally.
-2. Store backends - these support fetch, query and, if write-enabled, save. These are suitable where the backend contains a lot of data (infeasible to load locally - for examples a million rows) or where the backend has capabilities you want to take advantage of.
+* Loader backends - only implement fetch method. The data is then cached in a
+ Memory.Store on the Dataset and interacted with there. This is best for
+ sources which just allow you to load data or where you want to load the data
+ once and work with it locally.
+* Store backends - these support fetch, query and, if write-enabled, save.
+ These are suitable where the backend contains a lot of data (infeasible to
+ load locally - for examples a million rows) or where the backend has
+ capabilities you want to take advantage of.
-# List of Backends Shipped with Recline
+Examples of the 2 types of backends are provided by the Google docs backend (a
+"Loader" backend) and the ElasticSearch backend (a Store backend).
-{% include backend-list.html %}
+# Available Backends
-NB: examples of the 2 types of backends are provided by the Google docs backend (a "Loader" backend) and the ElasticSearch backend (a Store backend).
-
-It's easy to write your own backend - you just need to implement the API as described below.
+You can find a list of the available Backends along with examples of how to use
+them in the [Backends Tutorial](tutorial-backends.html).
+Note that it's easy to write your own backend - you just need to implement the
+Recline Backend API described below.
# Backend API
diff --git a/docs/index.html b/docs/index.html
index 94b500d5..c30d8c3f 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -52,10 +52,6 @@ root: ../
Note: often you are loading data from a given source in -order to load it into a Recline Dataset and display it in a View. However, you -can also happily use a Backend to load data on its own without using any other -part of the Recline library as all the Backends are designed to have no -dependency on other parts of Recline.
-What Backends are available from Recline? -{% include backend-list.html %} -
-Backend you'd like to see not available? It's easy to write your own – see the Backend reference docs for details of the required API. -
+## What Backends are available from Recline? + +{% include backend-list.html %} + +**Backend you'd like to see not available?** It's easy to write your own +– see the Backend reference docs for details +of the required API. ## Preparing your app @@ -76,8 +77,9 @@ much more limited if you are just using a Backend. Specifically: + - + @@ -91,13 +93,6 @@ Doc](https://docs.google.com/spreadsheet/ccc?key=0Aon3JiuouxLUdGZPaUZsMjBxeGhfOW For Recline to be able to access a Google Spreadsheet it **must** have been 'Published to the Web' (enabled via File -> Publish to the Web menu). -
- // {
- // records: [ [...], [...], ... ],
- // metadata: { may be some metadata e.g. file name }
- // useMemoryStore: true
- // }
- //
- my.fetch = function(dataset) {
- var dfd = new Deferred();
- if (dataset.file) {
- var reader = new FileReader();
- var encoding = dataset.encoding || 'UTF-8';
- reader.onload = function(e) {
- var out = my.extractFields(my.parseCSV(e.target.result, dataset), dataset);
- out.useMemoryStore = true;
- out.metadata = {
- filename: dataset.file.name
- }
- dfd.resolve(out);
- };
- reader.onerror = function (e) {
- alert('Failed to load file. Code: ' + e.target.error.code);
- };
- reader.readAsText(dataset.file, encoding);
- } else if (dataset.data) {
- var out = my.extractFields(my.parseCSV(dataset.data, dataset), dataset);
- out.useMemoryStore = true;
- dfd.resolve(out);
- } else if (dataset.url) {
- jQuery.get(dataset.url).done(function(data) {
- var out = my.extractFields(my.parseCSV(data, dataset), dataset);
- out.useMemoryStore = true;
- dfd.resolve(out);
- });
- }
- return dfd.promise();
- };
-
- // Convert array of rows in { records: [ ...] , fields: [ ... ] }
- // @param {Boolean} noHeaderRow If true assume that first row is not a header (i.e. list of fields but is data.
- my.extractFields = function(rows, noFields) {
- if (noFields.noHeaderRow !== true && rows.length > 0) {
- return {
- fields: rows[0],
- records: rows.slice(1)
- }
- } else {
- return {
- records: rows
- }
- }
- };
-
- // ## parseCSV
- //
- // Converts a Comma Separated Values string into an array of arrays.
- // Each line in the CSV becomes an array.
- //
- // Empty fields are converted to nulls and non-quoted numbers are converted to integers or floats.
- //
- // @return The CSV parsed as an array
- // @type Array
- //
- // @param {String} s The string to convert
- // @param {Object} options Options for loading CSV including
- // @param {Boolean} [trim=false] If set to True leading and trailing
- // whitespace is stripped off of each non-quoted field as it is imported
- // @param {String} [delimiter=','] A one-character string used to separate
- // fields. It defaults to ','
- // @param {String} [quotechar='"'] A one-character string used to quote
- // fields containing special characters, such as the delimiter or
- // quotechar, or which contain new-line characters. It defaults to '"'
- //
- // @param {Integer} skipInitialRows A integer number of rows to skip (default 0)
- //
- // Heavily based on uselesscode's JS CSV parser (MIT Licensed):
- // http://www.uselesscode.org/javascript/csv/
- my.parseCSV= function(s, options) {
- // Get rid of any trailing \n
- s = chomp(s);
-
- var options = options || {};
- var trm = (options.trim === false) ? false : true;
- var delimiter = options.delimiter || ',';
- var quotechar = options.quotechar || '"';
-
- var cur = '', // The character we are currently processing.
- inQuote = false,
- fieldQuoted = false,
- field = '', // Buffer for building up the current field
- row = [],
- out = [],
- i,
- processField;
-
- processField = function (field) {
- if (fieldQuoted !== true) {
- // If field is empty set to null
- if (field === '') {
- field = null;
- // If the field was not quoted and we are trimming fields, trim it
- } else if (trm === true) {
- field = trim(field);
- }
-
- // Convert unquoted numbers to their appropriate types
- if (rxIsInt.test(field)) {
- field = parseInt(field, 10);
- } else if (rxIsFloat.test(field)) {
- field = parseFloat(field, 10);
- }
- }
- return field;
- };
-
- for (i = 0; i < s.length; i += 1) {
- cur = s.charAt(i);
-
- // If we are at a EOF or EOR
- if (inQuote === false && (cur === delimiter || cur === "\n")) {
- field = processField(field);
- // Add the current field to the current row
- row.push(field);
- // If this is EOR append row to output and flush row
- if (cur === "\n") {
- out.push(row);
- row = [];
- }
- // Flush the field buffer
- field = '';
- fieldQuoted = false;
- } else {
- // If it's not a quotechar, add it to the field buffer
- if (cur !== quotechar) {
- field += cur;
- } else {
- if (!inQuote) {
- // We are not in a quote, start a quote
- inQuote = true;
- fieldQuoted = true;
- } else {
- // Next char is quotechar, this is an escaped quotechar
- if (s.charAt(i + 1) === quotechar) {
- field += quotechar;
- // Skip the next char
- i += 1;
- } else {
- // It's not escaping, so end quote
- inQuote = false;
- }
- }
- }
- }
- }
-
- // Add the last field
- field = processField(field);
- row.push(field);
- out.push(row);
-
- // Expose the ability to discard initial rows
- if (options.skipInitialRows) out = out.slice(options.skipInitialRows);
-
- return out;
- };
-
- // ## serializeCSV
- //
- // Convert an Object or a simple array of arrays into a Comma
- // Separated Values string.
- //
- // Nulls are converted to empty fields and integers or floats are converted to non-quoted numbers.
- //
- // @return The array serialized as a CSV
- // @type String
- //
- // @param {Object or Array} dataToSerialize The Object or array of arrays to convert. Object structure must be as follows:
- //
- // {
- // fields: [ {id: .., ...}, {id: ...,
- // records: [ { record }, { record }, ... ]
- // ... // more attributes we do not care about
- // }
- //
- // @param {object} options Options for serializing the CSV file including
- // delimiter and quotechar (see parseCSV options parameter above for
- // details on these).
- //
- // Heavily based on uselesscode's JS CSV serializer (MIT Licensed):
- // http://www.uselesscode.org/javascript/csv/
- my.serializeCSV= function(dataToSerialize, options) {
- var a = null;
- if (dataToSerialize instanceof Array) {
- a = dataToSerialize;
- } else {
- a = [];
- var fieldNames = _.pluck(dataToSerialize.fields, 'id');
- a.push(fieldNames);
- _.each(dataToSerialize.records, function(record, index) {
- var tmp = _.map(fieldNames, function(fn) {
- return record[fn];
- });
- a.push(tmp);
- });
- }
- var options = options || {};
- var delimiter = options.delimiter || ',';
- var quotechar = options.quotechar || '"';
-
- var cur = '', // The character we are currently processing.
- field = '', // Buffer for building up the current field
- row = '',
- out = '',
- i,
- j,
- processField;
-
- processField = function (field) {
- if (field === null) {
- // If field is null set to empty string
- field = '';
- } else if (typeof field === "string" && rxNeedsQuoting.test(field)) {
- // Convert string to delimited string
- field = quotechar + field + quotechar;
- } else if (typeof field === "number") {
- // Convert number to string
- field = field.toString(10);
- }
-
- return field;
- };
-
- for (i = 0; i < a.length; i += 1) {
- cur = a[i];
-
- for (j = 0; j < cur.length; j += 1) {
- field = processField(cur[j]);
- // If this is EOR append row to output and flush row
- if (j === (cur.length - 1)) {
- row += field;
- out += row + "\n";
- row = '';
- } else {
- // Add the current field to the current row
- row += field + delimiter;
- }
- // Flush the field buffer
- field = '';
- }
- }
-
- return out;
- };
-
- var rxIsInt = /^\d+$/,
- rxIsFloat = /^\d*\.\d+$|^\d+\.\d*$/,
- // If a string has leading or trailing space,
- // contains a comma double quote or a newline
- // it needs to be quoted in CSV output
- rxNeedsQuoting = /^\s|\s$|,|"|\n/,
- trim = (function () {
- // Fx 3.1 has a native trim function, it's about 10x faster, use it if it exists
- if (String.prototype.trim) {
- return function (s) {
- return s.trim();
- };
- } else {
- return function (s) {
- return s.replace(/^\s*/, '').replace(/\s*$/, '');
- };
- }
- }());
-
- function chomp(s) {
- if (s.charAt(s.length - 1) !== "\n") {
- // Does not end with \n, just return string
- return s;
- } else {
- // Remove the \n
- return s.substring(0, s.length - 1);
- }
- }
-
-
-}(this.recline.Backend.CSV));
diff --git a/src/view.multiview.js b/src/view.multiview.js
index 6d7e754a..b0e2480f 100644
--- a/src/view.multiview.js
+++ b/src/view.multiview.js
@@ -11,7 +11,7 @@ this.recline.View = this.recline.View || {};
// Manage multiple views together along with query editor etc. Usage:
//
//
-// var myExplorer = new model.recline.MultiView({
+// var myExplorer = new recline.View.MultiView({
// model: {{recline.Model.Dataset instance}}
// el: {{an existing dom element}}
// views: {{dataset views}}
@@ -61,7 +61,7 @@ this.recline.View = this.recline.View || {};
// {
// id: 'filterEditor', // used for routing
// label: 'Filters', // used for view switcher
-// view: new recline.View.FielterEditor({
+// view: new recline.View.FilterEditor({
// model: dataset
// })
// },
@@ -79,10 +79,10 @@ this.recline.View = this.recline.View || {};
// special as it includes config of many of the subviews.
//
//
-// state = {
+// var state = {
// query: {dataset query state - see dataset.queryState object}
-// view-{id1}: {view-state for this view}
-// view-{id2}: {view-state for }
+// 'view-{id1}': {view-state for this view}
+// 'view-{id2}': {view-state for }
// ...
// // Explorer
// currentView: id of current view (defaults to first view if not specified)
diff --git a/test/backend.csv.test.js b/test/backend.csv.test.js
deleted file mode 100644
index e7b09b72..00000000
--- a/test/backend.csv.test.js
+++ /dev/null
@@ -1,112 +0,0 @@
-(function ($) {
-module("Backend Local CSV");
-
-test("parseCSV", function() {
- var csv = '"Jones, Jay",10\n' +
- '"Xyz ""ABC"" O\'Brien",11:35\n' +
- '"Other, AN",12:35\n';
-
- var array = recline.Backend.CSV.parseCSV(csv);
- var exp = [
- ['Jones, Jay', 10],
- ['Xyz "ABC" O\'Brien', '11:35' ],
- ['Other, AN', '12:35' ]
- ];
- deepEqual(exp, array);
-
- var csv = '"Jones, Jay", 10\n' +
- '"Xyz ""ABC"" O\'Brien", 11:35\n' +
- '"Other, AN", 12:35\n';
- var array = recline.Backend.CSV.parseCSV(csv, {trim : true});
- deepEqual(exp, array);
-
- var csv = 'Name, Value\n' +
- '"Jones, Jay", 10\n' +
- '"Xyz ""ABC"" O\'Brien", 11:35\n' +
- '"Other, AN", 12:35\n';
- var dataset = new recline.Model.Dataset({
- data: csv,
- backend: 'csv'
- });
- dataset.fetch();
- equal(dataset.records.length, 3);
- var row = dataset.records.models[0].toJSON();
- deepEqual(row, {Name: 'Jones, Jay', Value: 10});
-});
-
-test("parseCSV - semicolon", function() {
- var csv = '"Jones; Jay";10\n' +
- '"Xyz ""ABC"" O\'Brien";11:35\n' +
- '"Other; AN";12:35\n';
-
- var array = recline.Backend.CSV.parseCSV(csv, {delimiter : ';'});
- var exp = [
- ['Jones; Jay', 10],
- ['Xyz "ABC" O\'Brien', '11:35' ],
- ['Other; AN', '12:35' ]
- ];
- deepEqual(exp, array);
-
-});
-
-test("parseCSV - quotechar", function() {
- var csv = "'Jones, Jay',10\n" +
- "'Xyz \"ABC\" O''Brien',11:35\n" +
- "'Other; AN',12:35\n";
-
- var array = recline.Backend.CSV.parseCSV(csv, {quotechar:"'"});
- var exp = [
- ["Jones, Jay", 10],
- ["Xyz \"ABC\" O'Brien", "11:35" ],
- ["Other; AN", "12:35" ]
- ];
- deepEqual(exp, array);
-
-});
-
-test("parseCSV skipInitialRows", function() {
- var csv = '"Jones, Jay",10\n' +
- '"Xyz ""ABC"" O\'Brien",11:35\n' +
- '"Other, AN",12:35\n';
-
- var array = recline.Backend.CSV.parseCSV(csv, {skipInitialRows: 1});
- var exp = [
- ['Xyz "ABC" O\'Brien', '11:35' ],
- ['Other, AN', '12:35' ]
- ];
- deepEqual(exp, array);
-});
-
-test("serializeCSV - Array", function() {
- var csv = [
- ['Jones, Jay', 10],
- ['Xyz "ABC" O\'Brien', '11:35' ],
- ['Other, AN', '12:35' ]
- ];
-
- var array = recline.Backend.CSV.serializeCSV(csv);
- var exp = '"Jones, Jay",10\n' +
- '"Xyz \"ABC\" O\'Brien",11:35\n' +
- '"Other, AN",12:35\n';
- deepEqual(array, exp);
-});
-
-test("serializeCSV - Object", function() {
- var indata = {
- fields: [ {id: 'name'}, {id: 'number'}],
- records: [
- {name: 'Jones, Jay', number: 10},
- {name: 'Xyz "ABC" O\'Brien', number: '11:35' },
- {name: 'Other, AN', number: '12:35' }
- ]
- };
-
- var array = recline.Backend.CSV.serializeCSV(indata);
- var exp = 'name,number\n' +
- '"Jones, Jay",10\n' +
- '"Xyz \"ABC\" O\'Brien",11:35\n' +
- '"Other, AN",12:35\n';
- deepEqual(array, exp);
-});
-
-})(this.jQuery);