From 095f64e1f3cca2f036d82f614928415217198aaf Mon Sep 17 00:00:00 2001
From: Rufus Pollock Note: often you are loading data from a given source in
-order to load it into a Recline Dataset and display it in a View. However, you
-can also happily use a Backend to load data on its own without using any other
-part of the Recline library as all the Backends are designed to have no
-dependency on other parts of Recline.
-
+* gdocs: Google Docs (Spreadsheet)
+* csv: CSV files
+* solr: SOLR (partial)
+* elasticsearch: ElasticSearch
+* dataproxy: DataProxy (CSV and XLS on the Web)
+* ckan: CKAN – support for CKAN datastore
+* couchdb: CouchDB
+* memory: Memory (local data)
diff --git a/_includes/example-backends-online-csv.js b/_includes/example-backends-online-csv.js
index 284998c9..9cf128b3 100644
--- a/_includes/example-backends-online-csv.js
+++ b/_includes/example-backends-online-csv.js
@@ -1,7 +1,7 @@
// Create the dataset in the usual way
// Note the additional options you can specify for parsing the CSV file
var dataset = new recline.Model.Dataset({
- url: '{{page.root}}/demos/data/sample.csv',
+ url: '{{page.root}}demos/data/sample.csv',
backend: 'csv',
// delimiter: ',',
// quotechar: '"',
diff --git a/_includes/recline-deps.html b/_includes/recline-deps.html
index 7e26d98e..60b2005e 100644
--- a/_includes/recline-deps.html
+++ b/_includes/recline-deps.html
@@ -67,7 +67,7 @@
-
+
diff --git a/dist/recline.js b/dist/recline.js
index c439eb96..6f2aab1d 100644
--- a/dist/recline.js
+++ b/dist/recline.js
@@ -1,312 +1,5 @@
this.recline = this.recline || {};
this.recline.Backend = this.recline.Backend || {};
-this.recline.Backend.CSV = this.recline.Backend.CSV || {};
-
-// Note that provision of jQuery is optional (it is **only** needed if you use fetch on a remote file)
-(function(my) {
- "use strict";
- my.__type__ = 'csv';
-
- // use either jQuery or Underscore Deferred depending on what is available
- var Deferred = (typeof jQuery !== "undefined" && jQuery.Deferred) || _.Deferred;
-
- // ## fetch
- //
- // fetch supports 3 options depending on the attribute provided on the dataset argument
- //
- // 1. `dataset.file`: `file` is an HTML5 file object. This is opened and parsed with the CSV parser.
- // 2. `dataset.data`: `data` is a string in CSV format. This is passed directly to the CSV parser
- // 3. `dataset.url`: a url to an online CSV file that is ajax accessible (note this usually requires either local or on a server that is CORS enabled). The file is then loaded using jQuery.ajax and parsed using the CSV parser (NB: this requires jQuery)
- //
- // All options generates similar data and use the memory store outcome, that is they return something like:
- //
- //
- // {
- // records: [ [...], [...], ... ],
- // metadata: { may be some metadata e.g. file name }
- // useMemoryStore: true
- // }
- //
- my.fetch = function(dataset) {
- var dfd = new Deferred();
- if (dataset.file) {
- var reader = new FileReader();
- var encoding = dataset.encoding || 'UTF-8';
- reader.onload = function(e) {
- var out = my.extractFields(my.parseCSV(e.target.result, dataset), dataset);
- out.useMemoryStore = true;
- out.metadata = {
- filename: dataset.file.name
- }
- dfd.resolve(out);
- };
- reader.onerror = function (e) {
- alert('Failed to load file. Code: ' + e.target.error.code);
- };
- reader.readAsText(dataset.file, encoding);
- } else if (dataset.data) {
- var out = my.extractFields(my.parseCSV(dataset.data, dataset), dataset);
- out.useMemoryStore = true;
- dfd.resolve(out);
- } else if (dataset.url) {
- jQuery.get(dataset.url).done(function(data) {
- var out = my.extractFields(my.parseCSV(data, dataset), dataset);
- out.useMemoryStore = true;
- dfd.resolve(out);
- });
- }
- return dfd.promise();
- };
-
- // Convert array of rows in { records: [ ...] , fields: [ ... ] }
- // @param {Boolean} noHeaderRow If true assume that first row is not a header (i.e. list of fields but is data.
- my.extractFields = function(rows, noFields) {
- if (noFields.noHeaderRow !== true && rows.length > 0) {
- return {
- fields: rows[0],
- records: rows.slice(1)
- }
- } else {
- return {
- records: rows
- }
- }
- };
-
- // ## parseCSV
- //
- // Converts a Comma Separated Values string into an array of arrays.
- // Each line in the CSV becomes an array.
- //
- // Empty fields are converted to nulls and non-quoted numbers are converted to integers or floats.
- //
- // @return The CSV parsed as an array
- // @type Array
- //
- // @param {String} s The string to convert
- // @param {Object} options Options for loading CSV including
- // @param {Boolean} [trim=false] If set to True leading and trailing
- // whitespace is stripped off of each non-quoted field as it is imported
- // @param {String} [delimiter=','] A one-character string used to separate
- // fields. It defaults to ','
- // @param {String} [quotechar='"'] A one-character string used to quote
- // fields containing special characters, such as the delimiter or
- // quotechar, or which contain new-line characters. It defaults to '"'
- //
- // @param {Integer} skipInitialRows A integer number of rows to skip (default 0)
- //
- // Heavily based on uselesscode's JS CSV parser (MIT Licensed):
- // http://www.uselesscode.org/javascript/csv/
- my.parseCSV= function(s, options) {
- // Get rid of any trailing \n
- s = chomp(s);
-
- var options = options || {};
- var trm = (options.trim === false) ? false : true;
- var delimiter = options.delimiter || ',';
- var quotechar = options.quotechar || '"';
-
- var cur = '', // The character we are currently processing.
- inQuote = false,
- fieldQuoted = false,
- field = '', // Buffer for building up the current field
- row = [],
- out = [],
- i,
- processField;
-
- processField = function (field) {
- if (fieldQuoted !== true) {
- // If field is empty set to null
- if (field === '') {
- field = null;
- // If the field was not quoted and we are trimming fields, trim it
- } else if (trm === true) {
- field = trim(field);
- }
-
- // Convert unquoted numbers to their appropriate types
- if (rxIsInt.test(field)) {
- field = parseInt(field, 10);
- } else if (rxIsFloat.test(field)) {
- field = parseFloat(field, 10);
- }
- }
- return field;
- };
-
- for (i = 0; i < s.length; i += 1) {
- cur = s.charAt(i);
-
- // If we are at a EOF or EOR
- if (inQuote === false && (cur === delimiter || cur === "\n")) {
- field = processField(field);
- // Add the current field to the current row
- row.push(field);
- // If this is EOR append row to output and flush row
- if (cur === "\n") {
- out.push(row);
- row = [];
- }
- // Flush the field buffer
- field = '';
- fieldQuoted = false;
- } else {
- // If it's not a quotechar, add it to the field buffer
- if (cur !== quotechar) {
- field += cur;
- } else {
- if (!inQuote) {
- // We are not in a quote, start a quote
- inQuote = true;
- fieldQuoted = true;
- } else {
- // Next char is quotechar, this is an escaped quotechar
- if (s.charAt(i + 1) === quotechar) {
- field += quotechar;
- // Skip the next char
- i += 1;
- } else {
- // It's not escaping, so end quote
- inQuote = false;
- }
- }
- }
- }
- }
-
- // Add the last field
- field = processField(field);
- row.push(field);
- out.push(row);
-
- // Expose the ability to discard initial rows
- if (options.skipInitialRows) out = out.slice(options.skipInitialRows);
-
- return out;
- };
-
- // ## serializeCSV
- //
- // Convert an Object or a simple array of arrays into a Comma
- // Separated Values string.
- //
- // Nulls are converted to empty fields and integers or floats are converted to non-quoted numbers.
- //
- // @return The array serialized as a CSV
- // @type String
- //
- // @param {Object or Array} dataToSerialize The Object or array of arrays to convert. Object structure must be as follows:
- //
- // {
- // fields: [ {id: .., ...}, {id: ...,
- // records: [ { record }, { record }, ... ]
- // ... // more attributes we do not care about
- // }
- //
- // @param {object} options Options for serializing the CSV file including
- // delimiter and quotechar (see parseCSV options parameter above for
- // details on these).
- //
- // Heavily based on uselesscode's JS CSV serializer (MIT Licensed):
- // http://www.uselesscode.org/javascript/csv/
- my.serializeCSV= function(dataToSerialize, options) {
- var a = null;
- if (dataToSerialize instanceof Array) {
- a = dataToSerialize;
- } else {
- a = [];
- var fieldNames = _.pluck(dataToSerialize.fields, 'id');
- a.push(fieldNames);
- _.each(dataToSerialize.records, function(record, index) {
- var tmp = _.map(fieldNames, function(fn) {
- return record[fn];
- });
- a.push(tmp);
- });
- }
- var options = options || {};
- var delimiter = options.delimiter || ',';
- var quotechar = options.quotechar || '"';
-
- var cur = '', // The character we are currently processing.
- field = '', // Buffer for building up the current field
- row = '',
- out = '',
- i,
- j,
- processField;
-
- processField = function (field) {
- if (field === null) {
- // If field is null set to empty string
- field = '';
- } else if (typeof field === "string" && rxNeedsQuoting.test(field)) {
- // Convert string to delimited string
- field = quotechar + field + quotechar;
- } else if (typeof field === "number") {
- // Convert number to string
- field = field.toString(10);
- }
-
- return field;
- };
-
- for (i = 0; i < a.length; i += 1) {
- cur = a[i];
-
- for (j = 0; j < cur.length; j += 1) {
- field = processField(cur[j]);
- // If this is EOR append row to output and flush row
- if (j === (cur.length - 1)) {
- row += field;
- out += row + "\n";
- row = '';
- } else {
- // Add the current field to the current row
- row += field + delimiter;
- }
- // Flush the field buffer
- field = '';
- }
- }
-
- return out;
- };
-
- var rxIsInt = /^\d+$/,
- rxIsFloat = /^\d*\.\d+$|^\d+\.\d*$/,
- // If a string has leading or trailing space,
- // contains a comma double quote or a newline
- // it needs to be quoted in CSV output
- rxNeedsQuoting = /^\s|\s$|,|"|\n/,
- trim = (function () {
- // Fx 3.1 has a native trim function, it's about 10x faster, use it if it exists
- if (String.prototype.trim) {
- return function (s) {
- return s.trim();
- };
- } else {
- return function (s) {
- return s.replace(/^\s*/, '').replace(/\s*$/, '');
- };
- }
- }());
-
- function chomp(s) {
- if (s.charAt(s.length - 1) !== "\n") {
- // Does not end with \n, just return string
- return s;
- } else {
- // Remove the \n
- return s.substring(0, s.length - 1);
- }
- }
-
-
-}(this.recline.Backend.CSV));
-this.recline = this.recline || {};
-this.recline.Backend = this.recline.Backend || {};
this.recline.Backend.DataProxy = this.recline.Backend.DataProxy || {};
(function(my) {
@@ -2813,7 +2506,7 @@ this.recline.View = this.recline.View || {};
// Manage multiple views together along with query editor etc. Usage:
//
//
-// var myExplorer = new model.recline.MultiView({
+// var myExplorer = new recline.View.MultiView({
// model: {{recline.Model.Dataset instance}}
// el: {{an existing dom element}}
// views: {{dataset views}}
@@ -2863,7 +2556,7 @@ this.recline.View = this.recline.View || {};
// {
// id: 'filterEditor', // used for routing
// label: 'Filters', // used for view switcher
-// view: new recline.View.FielterEditor({
+// view: new recline.View.FilterEditor({
// model: dataset
// })
// },
@@ -2881,10 +2574,10 @@ this.recline.View = this.recline.View || {};
// special as it includes config of many of the subviews.
//
//
-// state = {
+// var state = {
// query: {dataset query state - see dataset.queryState object}
-// view-{id1}: {view-state for this view}
-// view-{id2}: {view-state for }
+// 'view-{id1}': {view-state for this view}
+// 'view-{id2}': {view-state for }
// ...
// // Explorer
// currentView: id of current view (defaults to first view if not specified)
diff --git a/docs/backends.markdown b/docs/backends.markdown
index 527f9014..09adb183 100644
--- a/docs/backends.markdown
+++ b/docs/backends.markdown
@@ -20,17 +20,25 @@ on the backend.
Backends come in 2 flavours:
-1. Loader backends - only implement fetch method. The data is then cached in a Memory.Store on the Dataset and interacted with there. This is best for sources which just allow you to load data or where you want to load the data once and work with it locally.
-2. Store backends - these support fetch, query and, if write-enabled, save. These are suitable where the backend contains a lot of data (infeasible to load locally - for examples a million rows) or where the backend has capabilities you want to take advantage of.
+* Loader backends - only implement fetch method. The data is then cached in a
+ Memory.Store on the Dataset and interacted with there. This is best for
+ sources which just allow you to load data or where you want to load the data
+ once and work with it locally.
+* Store backends - these support fetch, query and, if write-enabled, save.
+ These are suitable where the backend contains a lot of data (infeasible to
+ load locally - for examples a million rows) or where the backend has
+ capabilities you want to take advantage of.
-# List of Backends Shipped with Recline
+Examples of the 2 types of backends are provided by the Google docs backend (a
+"Loader" backend) and the ElasticSearch backend (a Store backend).
-{% include backend-list.html %}
+# Available Backends
-NB: examples of the 2 types of backends are provided by the Google docs backend (a "Loader" backend) and the ElasticSearch backend (a Store backend).
-
-It's easy to write your own backend - you just need to implement the API as described below.
+You can find a list of the available Backends along with examples of how to use
+them in the [Backends Tutorial](tutorial-backends.html).
+Note that it's easy to write your own backend - you just need to implement the
+Recline Backend API described below.
# Backend API
diff --git a/docs/index.html b/docs/index.html
index 94b500d5..c30d8c3f 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -52,10 +52,6 @@ root: ../
Backends
- {% include backend-list.html %}
- Dataset Views and Widgets
diff --git a/docs/tutorial-backends.markdown b/docs/tutorial-backends.markdown
index 1b27fb0a..f64abd3d 100644
--- a/docs/tutorial-backends.markdown
+++ b/docs/tutorial-backends.markdown
@@ -14,26 +14,25 @@ sources such as Google Docs or the DataHub using Recline
What Backends are available from Recline? -{% include backend-list.html %} -
-Backend you'd like to see not available? It's easy to write your own – see the Backend reference docs for details of the required API. -
+## What Backends are available from Recline? + +{% include backend-list.html %} + +**Backend you'd like to see not available?** It's easy to write your own +– see the Backend reference docs for details +of the required API. ## Preparing your app @@ -76,8 +77,9 @@ much more limited if you are just using a Backend. Specifically: + - + @@ -91,13 +93,6 @@ Doc](https://docs.google.com/spreadsheet/ccc?key=0Aon3JiuouxLUdGZPaUZsMjBxeGhfOW For Recline to be able to access a Google Spreadsheet it **must** have been 'Published to the Web' (enabled via File -> Publish to the Web menu). -
- // {
- // records: [ [...], [...], ... ],
- // metadata: { may be some metadata e.g. file name }
- // useMemoryStore: true
- // }
- //
- my.fetch = function(dataset) {
- var dfd = new Deferred();
- if (dataset.file) {
- var reader = new FileReader();
- var encoding = dataset.encoding || 'UTF-8';
- reader.onload = function(e) {
- var out = my.extractFields(my.parseCSV(e.target.result, dataset), dataset);
- out.useMemoryStore = true;
- out.metadata = {
- filename: dataset.file.name
- }
- dfd.resolve(out);
- };
- reader.onerror = function (e) {
- alert('Failed to load file. Code: ' + e.target.error.code);
- };
- reader.readAsText(dataset.file, encoding);
- } else if (dataset.data) {
- var out = my.extractFields(my.parseCSV(dataset.data, dataset), dataset);
- out.useMemoryStore = true;
- dfd.resolve(out);
- } else if (dataset.url) {
- jQuery.get(dataset.url).done(function(data) {
- var out = my.extractFields(my.parseCSV(data, dataset), dataset);
- out.useMemoryStore = true;
- dfd.resolve(out);
- });
- }
- return dfd.promise();
- };
-
- // Convert array of rows in { records: [ ...] , fields: [ ... ] }
- // @param {Boolean} noHeaderRow If true assume that first row is not a header (i.e. list of fields but is data.
- my.extractFields = function(rows, noFields) {
- if (noFields.noHeaderRow !== true && rows.length > 0) {
- return {
- fields: rows[0],
- records: rows.slice(1)
- }
- } else {
- return {
- records: rows
- }
- }
- };
-
- // ## parseCSV
- //
- // Converts a Comma Separated Values string into an array of arrays.
- // Each line in the CSV becomes an array.
- //
- // Empty fields are converted to nulls and non-quoted numbers are converted to integers or floats.
- //
- // @return The CSV parsed as an array
- // @type Array
- //
- // @param {String} s The string to convert
- // @param {Object} options Options for loading CSV including
- // @param {Boolean} [trim=false] If set to True leading and trailing
- // whitespace is stripped off of each non-quoted field as it is imported
- // @param {String} [delimiter=','] A one-character string used to separate
- // fields. It defaults to ','
- // @param {String} [quotechar='"'] A one-character string used to quote
- // fields containing special characters, such as the delimiter or
- // quotechar, or which contain new-line characters. It defaults to '"'
- //
- // @param {Integer} skipInitialRows A integer number of rows to skip (default 0)
- //
- // Heavily based on uselesscode's JS CSV parser (MIT Licensed):
- // http://www.uselesscode.org/javascript/csv/
- my.parseCSV= function(s, options) {
- // Get rid of any trailing \n
- s = chomp(s);
-
- var options = options || {};
- var trm = (options.trim === false) ? false : true;
- var delimiter = options.delimiter || ',';
- var quotechar = options.quotechar || '"';
-
- var cur = '', // The character we are currently processing.
- inQuote = false,
- fieldQuoted = false,
- field = '', // Buffer for building up the current field
- row = [],
- out = [],
- i,
- processField;
-
- processField = function (field) {
- if (fieldQuoted !== true) {
- // If field is empty set to null
- if (field === '') {
- field = null;
- // If the field was not quoted and we are trimming fields, trim it
- } else if (trm === true) {
- field = trim(field);
- }
-
- // Convert unquoted numbers to their appropriate types
- if (rxIsInt.test(field)) {
- field = parseInt(field, 10);
- } else if (rxIsFloat.test(field)) {
- field = parseFloat(field, 10);
- }
- }
- return field;
- };
-
- for (i = 0; i < s.length; i += 1) {
- cur = s.charAt(i);
-
- // If we are at a EOF or EOR
- if (inQuote === false && (cur === delimiter || cur === "\n")) {
- field = processField(field);
- // Add the current field to the current row
- row.push(field);
- // If this is EOR append row to output and flush row
- if (cur === "\n") {
- out.push(row);
- row = [];
- }
- // Flush the field buffer
- field = '';
- fieldQuoted = false;
- } else {
- // If it's not a quotechar, add it to the field buffer
- if (cur !== quotechar) {
- field += cur;
- } else {
- if (!inQuote) {
- // We are not in a quote, start a quote
- inQuote = true;
- fieldQuoted = true;
- } else {
- // Next char is quotechar, this is an escaped quotechar
- if (s.charAt(i + 1) === quotechar) {
- field += quotechar;
- // Skip the next char
- i += 1;
- } else {
- // It's not escaping, so end quote
- inQuote = false;
- }
- }
- }
- }
- }
-
- // Add the last field
- field = processField(field);
- row.push(field);
- out.push(row);
-
- // Expose the ability to discard initial rows
- if (options.skipInitialRows) out = out.slice(options.skipInitialRows);
-
- return out;
- };
-
- // ## serializeCSV
- //
- // Convert an Object or a simple array of arrays into a Comma
- // Separated Values string.
- //
- // Nulls are converted to empty fields and integers or floats are converted to non-quoted numbers.
- //
- // @return The array serialized as a CSV
- // @type String
- //
- // @param {Object or Array} dataToSerialize The Object or array of arrays to convert. Object structure must be as follows:
- //
- // {
- // fields: [ {id: .., ...}, {id: ...,
- // records: [ { record }, { record }, ... ]
- // ... // more attributes we do not care about
- // }
- //
- // @param {object} options Options for serializing the CSV file including
- // delimiter and quotechar (see parseCSV options parameter above for
- // details on these).
- //
- // Heavily based on uselesscode's JS CSV serializer (MIT Licensed):
- // http://www.uselesscode.org/javascript/csv/
- my.serializeCSV= function(dataToSerialize, options) {
- var a = null;
- if (dataToSerialize instanceof Array) {
- a = dataToSerialize;
- } else {
- a = [];
- var fieldNames = _.pluck(dataToSerialize.fields, 'id');
- a.push(fieldNames);
- _.each(dataToSerialize.records, function(record, index) {
- var tmp = _.map(fieldNames, function(fn) {
- return record[fn];
- });
- a.push(tmp);
- });
- }
- var options = options || {};
- var delimiter = options.delimiter || ',';
- var quotechar = options.quotechar || '"';
-
- var cur = '', // The character we are currently processing.
- field = '', // Buffer for building up the current field
- row = '',
- out = '',
- i,
- j,
- processField;
-
- processField = function (field) {
- if (field === null) {
- // If field is null set to empty string
- field = '';
- } else if (typeof field === "string" && rxNeedsQuoting.test(field)) {
- // Convert string to delimited string
- field = quotechar + field + quotechar;
- } else if (typeof field === "number") {
- // Convert number to string
- field = field.toString(10);
- }
-
- return field;
- };
-
- for (i = 0; i < a.length; i += 1) {
- cur = a[i];
-
- for (j = 0; j < cur.length; j += 1) {
- field = processField(cur[j]);
- // If this is EOR append row to output and flush row
- if (j === (cur.length - 1)) {
- row += field;
- out += row + "\n";
- row = '';
- } else {
- // Add the current field to the current row
- row += field + delimiter;
- }
- // Flush the field buffer
- field = '';
- }
- }
-
- return out;
- };
-
- var rxIsInt = /^\d+$/,
- rxIsFloat = /^\d*\.\d+$|^\d+\.\d*$/,
- // If a string has leading or trailing space,
- // contains a comma double quote or a newline
- // it needs to be quoted in CSV output
- rxNeedsQuoting = /^\s|\s$|,|"|\n/,
- trim = (function () {
- // Fx 3.1 has a native trim function, it's about 10x faster, use it if it exists
- if (String.prototype.trim) {
- return function (s) {
- return s.trim();
- };
- } else {
- return function (s) {
- return s.replace(/^\s*/, '').replace(/\s*$/, '');
- };
- }
- }());
-
- function chomp(s) {
- if (s.charAt(s.length - 1) !== "\n") {
- // Does not end with \n, just return string
- return s;
- } else {
- // Remove the \n
- return s.substring(0, s.length - 1);
- }
- }
-
-
-}(this.recline.Backend.CSV));
diff --git a/test/backend.csv.test.js b/test/backend.csv.test.js
deleted file mode 100644
index e7b09b72..00000000
--- a/test/backend.csv.test.js
+++ /dev/null
@@ -1,112 +0,0 @@
-(function ($) {
-module("Backend Local CSV");
-
-test("parseCSV", function() {
- var csv = '"Jones, Jay",10\n' +
- '"Xyz ""ABC"" O\'Brien",11:35\n' +
- '"Other, AN",12:35\n';
-
- var array = recline.Backend.CSV.parseCSV(csv);
- var exp = [
- ['Jones, Jay', 10],
- ['Xyz "ABC" O\'Brien', '11:35' ],
- ['Other, AN', '12:35' ]
- ];
- deepEqual(exp, array);
-
- var csv = '"Jones, Jay", 10\n' +
- '"Xyz ""ABC"" O\'Brien", 11:35\n' +
- '"Other, AN", 12:35\n';
- var array = recline.Backend.CSV.parseCSV(csv, {trim : true});
- deepEqual(exp, array);
-
- var csv = 'Name, Value\n' +
- '"Jones, Jay", 10\n' +
- '"Xyz ""ABC"" O\'Brien", 11:35\n' +
- '"Other, AN", 12:35\n';
- var dataset = new recline.Model.Dataset({
- data: csv,
- backend: 'csv'
- });
- dataset.fetch();
- equal(dataset.records.length, 3);
- var row = dataset.records.models[0].toJSON();
- deepEqual(row, {Name: 'Jones, Jay', Value: 10});
-});
-
-test("parseCSV - semicolon", function() {
- var csv = '"Jones; Jay";10\n' +
- '"Xyz ""ABC"" O\'Brien";11:35\n' +
- '"Other; AN";12:35\n';
-
- var array = recline.Backend.CSV.parseCSV(csv, {delimiter : ';'});
- var exp = [
- ['Jones; Jay', 10],
- ['Xyz "ABC" O\'Brien', '11:35' ],
- ['Other; AN', '12:35' ]
- ];
- deepEqual(exp, array);
-
-});
-
-test("parseCSV - quotechar", function() {
- var csv = "'Jones, Jay',10\n" +
- "'Xyz \"ABC\" O''Brien',11:35\n" +
- "'Other; AN',12:35\n";
-
- var array = recline.Backend.CSV.parseCSV(csv, {quotechar:"'"});
- var exp = [
- ["Jones, Jay", 10],
- ["Xyz \"ABC\" O'Brien", "11:35" ],
- ["Other; AN", "12:35" ]
- ];
- deepEqual(exp, array);
-
-});
-
-test("parseCSV skipInitialRows", function() {
- var csv = '"Jones, Jay",10\n' +
- '"Xyz ""ABC"" O\'Brien",11:35\n' +
- '"Other, AN",12:35\n';
-
- var array = recline.Backend.CSV.parseCSV(csv, {skipInitialRows: 1});
- var exp = [
- ['Xyz "ABC" O\'Brien', '11:35' ],
- ['Other, AN', '12:35' ]
- ];
- deepEqual(exp, array);
-});
-
-test("serializeCSV - Array", function() {
- var csv = [
- ['Jones, Jay', 10],
- ['Xyz "ABC" O\'Brien', '11:35' ],
- ['Other, AN', '12:35' ]
- ];
-
- var array = recline.Backend.CSV.serializeCSV(csv);
- var exp = '"Jones, Jay",10\n' +
- '"Xyz \"ABC\" O\'Brien",11:35\n' +
- '"Other, AN",12:35\n';
- deepEqual(array, exp);
-});
-
-test("serializeCSV - Object", function() {
- var indata = {
- fields: [ {id: 'name'}, {id: 'number'}],
- records: [
- {name: 'Jones, Jay', number: 10},
- {name: 'Xyz "ABC" O\'Brien', number: '11:35' },
- {name: 'Other, AN', number: '12:35' }
- ]
- };
-
- var array = recline.Backend.CSV.serializeCSV(indata);
- var exp = 'name,number\n' +
- '"Jones, Jay",10\n' +
- '"Xyz \"ABC\" O\'Brien",11:35\n' +
- '"Other, AN",12:35\n';
- deepEqual(array, exp);
-});
-
-})(this.jQuery);