Projects
Select Project
const mongoose = require("mongoose");
const Schema = mongoose.Schema;// Create Schema
const CategorySchema = new Schema({
id: {
type: Schema.Types.ObjectId,
},
name: {
type: String,
required: true,
},
});module.exports = Category = mongoose.model("category", CategorySchema);
const mongoose = require("mongoose");
const Schema = mongoose.Schema;// Create Schema
const ProductSchema = new Schema({
id: {
type: Schema.Types.ObjectId,
},
title: {
type: String,
required: true,
},
routeName: {
type: String,
required: true,
max: 40,
},
items: [
{
product_id: {
type: Schema.Types.ObjectId,
},
name: {
type: String,
required: true,
},
imageUrl: {
type: String,
required: true,
},
price: {
type: String,
},
},
],
});module.exports = Product = mongoose.model("product", ProductSchema);
const mongoose = require("mongoose");const Schema = mongoose.Schema;//create Schema
const UserSchema = new Schema({
name: {
type: String,
required: true,
},
email: {
type: String,
required: true,
},
password: {
type: String,
required: true,
},
date: {
type: Date,
default: Date.now,
},
});module.exports = User = mongoose.model("users", UserSchema);
const express = require("express");
const router = express.Router();
const User = require("../model/User");
const bcrypt = require("bcryptjs");
const jwt = require("jsonwebtoken");
const { registerValidation, loginValidation } = require("../validation");
router.post("/register", async (req, res) => {
const { error } = registerValidation(req.body);
// console.log(req.body); if (error) {
return res.status(400).send(error.details[0].message);
} const isUserExist = await User.findOne({ email: req.body.email });
if (isUserExist) {
return res.status(400).send("user exist");
} const salt = await bcrypt.genSalt(10);
const hashPassword = await bcrypt.hash(req.body.password, salt);
const newUser = {
name: req.body.name,
email: req.body.email,
password: hashPassword,
}; const user = new User(newUser); try {
await user.save();
res.status(200).send({ user: user._id });
} catch (error) {
res.status(400).send(error);
}
});router.post("/login", async (req, res) => {
const { error } = loginValidation(req.body);
if (error) {
return res.status(400).send(error.details[0].message);
}
const isUserExist = await User.findOne({ email: req.body.email }); if (!isUserExist) {
return res.status(400).send({ msg: "user does not exist" });
} //password is correct
const validPass = await bcrypt.compare(
req.body.password,
isUserExist.password
);
if (!validPass) {
return res.status(400).send("email or password wrong");
} // create token
const token = jwt.sign({ _id: isUserExist._id }, process.env.SECRET_KEY);
res.header("auth-token", token).send({ token, user: isUserExist });
});
module.exports = router;
const express = require("express");
const router = express.Router();const mongoose = require("mongoose");
const Product = require("../model/ProductModal");
const Category = require("../model/CatModal");// Get all products
router.get("/product", (req, res) => {
Product.find()
.then((products) => res.json([]))
.catch((err) =>
res.status(404).json({
nopostsfound: "No product found",
})
);
});// Get all category name
router.get("/category", (req, res) => {
Category.find()
.then((category) => res.json([]))
.catch((err) =>
res.status(404).json({
nocatsfound: "No Category found",
})
);
});// Get all product by specific category name
router.get("/product/:cat_name", (req, res) => {
Product.find({ routeName: req.params["cat_name"] })
.then((product) => res.json(product))
.catch((err) =>
res.status(404).json({
nocatsfound: "No product found",
})
);
});module.exports = router;
const express = require("express");
const router = express.Router();
const gravatar = require("gravatar");
const bcrypt = require("bcryptjs");
const jwt = require("jsonwebtoken");
const keys = require("../../config/keys");
const passport = require("passport");// Load Input Validation
const validateRegisterInput = require("../../validation/register");
const validateLoginInput = require("../../validation/login");// Load User model
const User = require("../../models/User");// @route GET api/users/test
// @desc Tests users route
// @access Public
router.get("/test", (req, res) => res.json({ msg: "Users Works" }));// @route GET api/users/register
// @desc Register user
// @access Public
router.post("/register", (req, res) => {
const { errors, isValid } = validateRegisterInput(req.body); // Check Validation
if (!isValid) {
return res.status(400).json(errors);
} User.findOne({ email: req.body.email }).then((user) => {
if (user) {
errors.email = "Email already exists";
return res.status(400).json(errors);
} else {
const avatar = gravatar.url(req.body.email, {
s: "200", // Size
r: "pg", // Rating
d: "mm", // Default
}); const newUser = new User({
name: req.body.name,
email: req.body.email,
avatar,
password: req.body.password,
}); bcrypt.genSalt(10, (err, salt) => {
bcrypt.hash(newUser.password, salt, (err, hash) => {
if (err) throw err;
newUser.password = hash;
newUser
.save()
.then((user) => res.json(user))
.catch((err) => console.log(err));
});
});
}
});
});// @route GET api/users/login
// @desc Login User / Returning JWT Token
// @access Public
router.post("/login", (req, res) => {
const { errors, isValid } = validateLoginInput(req.body); // Check Validation
if (!isValid) {
return res.status(400).json(errors);
} const email = req.body.email;
const password = req.body.password; // Find user by email
User.findOne({ email }).then((user) => {
// Check for user
if (!user) {
errors.email = "User not found";
return res.status(404).json(errors);
} // Check Password
bcrypt.compare(password, user.password).then((isMatch) => {
if (isMatch) {
// User Matched
const payload = { id: user.id, name: user.name, avatar: user.avatar }; // Create JWT Payload // Sign Token
jwt.sign(
payload,
keys.secretOrKey,
{ expiresIn: 3600 },
(err, token) => {
res.json({
success: true,
token: "Bearer " + token,
});
}
);
} else {
errors.password = "Password incorrect";
return res.status(400).json(errors);
}
});
});
});// @route GET api/users/current
// @desc Return current user
// @access Private
router.get(
"/current",
passport.authenticate("jwt", { session: false }),
(req, res) => {
res.json({
id: req.user.id,
name: req.user.name,
email: req.user.email,
});
}
);module.exports = router;
if (process.env.NODE_ENV === "production") {
module.exports = require("./keys_prod");
} else {
module.exports = require("./keys_dev");
}
module.exports = {
mongoURI:
"mongodb+srv://ajay:ajay@cluster0-fhjlv.mongodb.net/ecommerce-shop?retryWrites=true&w=majority",
secretOrKey: "secret_key",
};
module.exports = {
mongoURI: process.env.MONGO_URI,
secretOrKey: process.env.SECRET_OR_KEY,
};
const express = require("express");
const PORT = process.env.PORT || 5000;
const app = express();
require("./db");
const productRoute = require("./routes/product");
const authRoute = require("./routes/auth");
const path = require("path");
/************ MIDDLEVARES *************/// Body parser middleware
app.use(express.json());
app.use(express.urlencoded({ extended: true }));// Add headers in order to perform all operation on API
// Because CORS Thing (Google it if you do not know)
app.use((req, res, next) => {
res.header("Access-Control-Allow-Origin", "*");
res.header("Access-Control-Allow-Header", "*"); next();
});
/************ ROUTES *************/
app.use("/api/v1/", productRoute);
app.use("/api/v1/user", authRoute);if (process.env.NODE_ENV === "production") {
app.use(express.static("../frontend/build")); app.get("*", (req, res) => {
res.sendFile(path.join(__dirname, "../frontend", "build", "index.html"));
});
}
// listen to the port
app.listen(PORT);
const mongoose = require("mongoose");
require("dotenv/config");return mongoose.connect(
process.env.DB_CONNECTION,
{ useNewUrlParser: true, useUnifiedTopology: true },
() => console.log("conected to db")
);
const Joi = require("@hapi/joi");
const registerValidation = (data) => {
const schema = Joi.object({
name: Joi.string().min(6).required(),
email: Joi.string().min(6).required().email(),
password: Joi.string().min(6).required(),
});
return schema.validate(data);
};const loginValidation = (data) => {
const schema = Joi.object({
email: Joi.string().min(6).required().email(),
password: Joi.string().min(6).required(),
});
return schema.validate(data);
};module.exports.registerValidation = registerValidation;
module.exports.loginValidation = loginValidation;
import React from "react";
import ReactDOM from "react-dom";
import "./index.css";
import App from "./App";
import * as serviceWorker from "./serviceWorker";
import { BrowserRouter } from "react-router-dom";
import { Provider } from "react-redux";
import store from "./redux/store";
ReactDOM.render(
<React.StrictMode>
<Provider store={store}>
<BrowserRouter>
<App />
</BrowserRouter>
</Provider>
</React.StrictMode>,
document.getElementById("root")
);// If you want your app to work offline and load faster, you can change
// unregister() to register() below. Note this comes with some pitfalls.
// Learn more about service workers: https://bit.ly/CRA-PWA
serviceWorker.unregister();
import React from 'react';
import { render } from '@testing-library/react';
import App from './App';test('renders learn react link', () => {
const { getByText } = render(<App />);
const linkElement = getByText(/learn react/i);
expect(linkElement).toBeInTheDocument();
});
/*The stockpivot function takes one row from the stock tableand returns two rows to be stored in the ticker table*/CREATE TABLE stocktable ( ticker VARCHAR2(20), trade_date DATE, open_price NUMBER, close_price NUMBER);BEGIN FOR indx IN 1 .. 100000 LOOP -- Might as well be optimistic! INSERT INTO stocktable VALUES ('STK' || indx, SYSDATE, indx, indx + 15); END LOOP; COMMIT;END;/CREATE TYPE tickertype AS OBJECT ( ticker VARCHAR2 (20) ,pricedate DATE ,pricetype VARCHAR2 (1) ,price NUMBER);/CREATE TYPE tickertypeset AS TABLE OF tickertype;/CREATE TABLE tickertable( ticker VARCHAR2(20), pricedate DATE, pricetype VARCHAR2(1), price NUMBER)/CREATE OR REPLACE PACKAGE pipelineIS TYPE ticker_tt IS TABLE OF tickertype; FUNCTION stockpivot_pl (dataset refcur_pkg.refcur_t) RETURN tickertypeset PIPELINED;END pipeline;//*In addition to using the PIPELINED keyword in the header,you use the PIPE ROW statement to send the value back tothe calling query, asynchronous to the function actually finishing and returning control.Notice also that the RETURN statement returns nothing butcontrol, since all the data has already been passed back.*/CREATE OR REPLACE PACKAGE BODY pipelineIS FUNCTION stockpivot_pl (dataset refcur_pkg.refcur_t) RETURN tickertypeset PIPELINED IS out_obj tickertype := tickertype (NULL, NULL, NULL, NULL); in_rec dataset%ROWTYPE; BEGIN LOOP FETCH dataset INTO in_rec; EXIT WHEN dataset%NOTFOUND; -- first row out_obj.ticker := in_rec.ticker; out_obj.pricetype := 'O'; out_obj.price := in_rec.open_price; out_obj.pricedate := in_rec.trade_date; PIPE ROW (out_obj); -- second row out_obj.pricetype := 'C'; out_obj.price := in_rec.close_price; out_obj.pricedate := in_rec.trade_date; PIPE ROW (out_obj); END LOOP; CLOSE dataset; RETURN; END;END pipeline;//*Notice that a query is passed as a parameter to the function. This is nota *string* (dynamic SQL). It is the query itself, which is then encasedwithin a CURSOR function call, which returns a cursor variable that isactually passed to the body of the function for fetching.*/INSERT INTO tickertable SELECT * FROM TABLE (pipeline.stockpivot_pl (CURSOR (SELECT * FROM stocktable))) WHERE ROWNUM < 10;
/*
What is a streaming table function? Well, first, a table function is a
function executed with the TABLE operator, and then within the FROM
clause of a query - in other words, a function that is selected from
just like a relational table! A common usage of table functions in the
Data Warehousing world is to stream data directly from one process or
transformation, to the next process without intermediate staging.
Hence, a table function used in this way is called a streaming table function.
For more details, check out my blog post: http://stevenfeuersteinonplsql.blogspot.com/2015/06/table-functions-part-4-streaming-table.html
*/-- Daily Record of Stock Activity
-- This table records the day on which trading took place, the ticker symbol,
-- and the open and close prices.
CREATE TABLE daily_record
(
ticker VARCHAR2 (20),
trade_date DATE,
open_price NUMBER,
close_price NUMBER
) ;-- Extremely Valuable Stocks
-- After all, humans cannot survive without chickpeas, broccoli and, most necessary of all, CHOCOLATE!
BEGIN
-- Populate the table.
INSERT INTO daily_record
VALUES ('CHICKPEAS',
trunc (SYSDATE),
10,
12);
INSERT INTO daily_record
VALUES ('BROCCOLI',
trunc (SYSDATE),
75,
87);
INSERT INTO daily_record
VALUES ('CHOCOLATE',
trunc (SYSDATE),
500,
2000);
COMMIT;
END;
/-- Table of Open and Close Prices
/*
The daily record's OK, but we really want to separate out those open
and close prices into separate rows - essentially do a very simple pivot of data.
Now, as I am sure many of you know, you do NOT need PL/SQL and the added
complexity of a table function to achieve this pivot. INSERT ALL will do
just fine, for example. So assume for the purposes of this demonstration
that the pivot logic is complex and needs procedural logic to get the job done.
*/CREATE TABLE open_and_close
(
ticker VARCHAR2 (20),
pricedate DATE,
pricetype VARCHAR2 (1),
price NUMBER
) ;-- An Object Type Mimicking the Table
/*
Table functions returns collections, in our case a nested table. Each element
in the nested table will need to match the structure of the open_and_close table.
I cannot, however, create a schema-level nested table like this
"CREATE TYPE nt AS TABLE OF table%ROWTYPE". %ROWTYPE is syntax known only to
PL/SQL, not SQL. So I will instead create an object type that mimics the relational table.
*/CREATE TYPE open_and_close_ot AS OBJECT
(
ticker VARCHAR2 (20),
pricedate DATE,
pricetype VARCHAR2 (1),
price NUMBER
);
/-- Create Nested Table On Object Type
-- And now I create the nested table that will be returned by the table function
-- as a collection of those object types.
CREATE TYPE open_and_close_nt AS TABLE OF open_and_close_ot;
/-- Package with Table Function
/*
My package specification holds two elements: the definition of the REF CURSOR
that will be used to pass in the cursor variable to the function, and the header
of the function. It takes in a dataset and a limit value (used by BULK COLLECT -
better than hard-coding it!), and returns a nested table, with each row of
daily_record broken out into two elements in the nested table.
*/CREATE OR REPLACE PACKAGE stock_mgr
IS
TYPE dailies_cur IS REF CURSOR
RETURN daily_record%ROWTYPE;
FUNCTION separate_dailies (dataset_in dailies_cur,
limit_in IN INTEGER DEFAULT 100)
RETURN open_and_close_nt;
END;
/-- The Table Function Definition
/*
Where all the magic happens. Of course, most of the magic will be the code
you write to implement your specific transformation. In this case it is rather
trivial. But the basic steps are: inside a loop, fetch the next N rows from the
dataset using BULK COLLECT; execute the transformation logic; put the transformed
data into the nested table; close the cursor when done; return the nested table.
*/CREATE OR REPLACE PACKAGE BODY stock_mgr
IS
FUNCTION separate_dailies (dataset_in dailies_cur,
limit_in IN INTEGER DEFAULT 100)
RETURN open_and_close_nt
IS
TYPE dataset_tt IS TABLE OF daily_record%ROWTYPE
INDEX BY PLS_INTEGER;
l_dataset dataset_tt;
l_separated open_and_close_nt := open_and_close_nt ();
BEGIN
LOOP
FETCH dataset_in BULK COLLECT INTO l_dataset LIMIT limit_in;
EXIT WHEN l_dataset.COUNT = 0;
l_separated.EXTEND (l_dataset.COUNT * 2);
FOR indx IN 1 .. l_dataset.COUNT
LOOP
l_separated ( (indx - 1) * 2 + 1) :=
open_and_close_ot (l_dataset (indx).ticker,
l_dataset (indx).trade_date,
'O',
l_dataset (indx).open_price);
l_separated ( (indx - 1) * 2 + 2) :=
open_and_close_ot (l_dataset (indx).ticker,
l_dataset (indx).trade_date,
'C',
l_dataset (indx).close_price);
END LOOP;
END LOOP;
CLOSE dataset_in;
RETURN l_separated;
END;
END;
/-- Execute Function in FROM Clause
/*
If this is the first time you are seeing a table function, pretty cool,
right? You can call the function right inside the FROM clause of a SELECT -
as long as you put it inside the TABLE operator. Furthermore, I use the CURSOR
expression to convert a dataset - another SELECT, and not a dynamic one;
it's not inside single quotes - into a cursor variable, which is then passed
to the function! Notice that I include an ORDER BY. You would expect the
data to be displayed exactly as it was put into the table, but you should not
ASSUME nor RELY on that. If you want to be sure of the order of data displayed,
you must always add an ORDER BY clause.
*/SELECT *
FROM TABLE (stock_mgr.separate_dailies (
CURSOR (SELECT * FROM daily_record)))
ORDER BY ticker, pricedate, pricetype;
/*
This is a PL/SQL implementation to tokenize a string, based on a delimiter c
haracter, into a set of "tokens". Two tokenizers are being provided:
One for VARCHAR2, one for CLOB. Both can be used procedurally in a
PL/SQL loop or in SQL as a table function.Created by Carsten Czarski
*/-- Create type to describe a table row
-- This type describes a row of the String Tokenizers' result table.
create type token_t as object(
token_text varchar2(4000),
start_pos number,
length number
)
/-- Create a table type to describe the result set
create type token_list as table of token_t
/-- CLOB Tokenizer: Type definition
/*
The String tokenizer is being implemented as two object types. One is for
tokenizing CLOBs, the other one for tokenizing VARCHAR2s. The functionality is
contained in its static and member functions. The types can be used procedurally
or as a SQL table function.
*/create type clob_tokenizer as object(
value_string clob,
delimiter varchar2(10),
parser_current_pos number,
last_token varchar2(4000),
constructor function clob_tokenizer (p_string in clob, p_delim in varchar2)
return self as result,
member function has_more_tokens return number,
member function next_token(self in out nocopy clob_tokenizer) return varchar2,
static function all_tokens (p_string in clob, p_delim in varchar2)
return token_list pipelined parallel_enable,
static function all_tokens_cur (p_cursor in sys_refcursor, p_delim in varchar2)
return token_list pipelined parallel_enable (partition p_cursor by any)
);
/-- CLOB tokenizer type implementation
-- The type body contains the actual implementations for the static and member procedures.
create or replace type body clob_tokenizer is
constructor function clob_tokenizer (p_string in clob, p_delim in varchar2) return self as result as
begin
self.value_string := p_string;
self.delimiter := p_delim;
self.parser_current_pos := 1;
self.last_token := null;
return ;
end;
member function has_more_tokens return number as
begin
if self.parser_current_pos <= dbms_lob.getlength(value_string) then
return 1;
else
return 0;
end if;
end;
member function next_token(self in out nocopy clob_tokenizer) return varchar2 is
l_next_delim_pos number;
l_token varchar2(4000);
begin
if self.has_more_tokens() = 1 then
l_next_delim_pos := dbms_lob.instr(self.value_string, self.delimiter, self.parser_current_pos);
if l_next_delim_pos = 0 then
l_token := dbms_lob.substr(
lob_loc => self.value_string,
amount => (dbms_lob.getlength(self.value_string) - self.parser_current_pos) + 1,
offset => self.parser_current_pos
);
parser_current_pos := dbms_lob.getlength(self.value_string) + 1;
else
l_token := dbms_lob.substr(
lob_loc => self.value_string,
amount => l_next_delim_pos - self.parser_current_pos,
offset => self.parser_current_pos
);
parser_current_pos := l_next_delim_pos + length(self.delimiter);
end if;
else
l_token := null;
end if;
self.last_token := l_token;
return l_token;
end;
static function all_tokens (p_string in clob, p_delim in varchar2)
return token_list pipelined parallel_enable is
l_st clob_tokenizer := clob_tokenizer(p_string, p_delim);
l_startpos number;
l_token varchar2(4000);
begin
while l_st.has_more_tokens = 1 loop
l_startpos := l_st.parser_current_pos;
l_token := l_st.next_token();
pipe row (token_t(l_token, l_startpos, nvl(length(l_token),0)));
end loop;
return;
end;
static function all_tokens_cur (p_cursor in sys_refcursor, p_delim in varchar2)
return token_list pipelined parallel_enable (partition p_cursor by any) is
l_st clob_tokenizer;
l_string clob;
l_startpos number;
l_token varchar2(4000);
begin
loop
fetch p_cursor into l_string;
exit when p_cursor%notfound;
l_st := clob_tokenizer(l_string, p_delim);
while l_st.has_more_tokens = 1 loop
l_startpos := l_st.parser_current_pos;
l_token := l_st.next_token();
pipe row (token_t(l_token, l_startpos, nvl(length(l_token),0)));
end loop;
end loop;
return;
end;
end;
/-- VARCHAR2 Tokenizer: Type definition
/*
The String tokenizer is being implemented as two object types. One is for
tokenizing CLOBs, the other one for tokenizing VARCHAR2s. The functionality is
contained in its static and member functions. The types can be used procedurally
or as a SQL table function.
*/create type string_tokenizer as object(
value_string varchar2(4000),
delimiter varchar2(10),
parser_current_pos number,
last_token varchar2(4000),
constructor function string_tokenizer (p_string in varchar2, p_delim in varchar2)
return self as result,
member function has_more_tokens(self in out nocopy string_tokenizer) return number,
member function next_token(self in out nocopy string_tokenizer) return varchar2,
static function all_tokens (p_string in varchar2, p_delim in varchar2)
return token_list pipelined parallel_enable,
static function all_tokens_cur (p_cursor in sys_refcursor, p_delim in varchar2)
return token_list pipelined parallel_enable (partition p_cursor by any)
);
/-- VARCHAR2 tokenizer type implementation
-- The type body contains the actual implementations for the static and member procedures.
create or replace type body string_tokenizer is
constructor function string_tokenizer (p_string in varchar2, p_delim in varchar2)
return self as result as
begin
self.value_string := p_string;
self.delimiter := p_delim;
self.parser_current_pos := 1;
self.last_token := null;
return ;
end;
member function has_more_tokens(self in out nocopy string_tokenizer) return number as
begin
if self.parser_current_pos <= length(value_string) then
return 1;
else
return 0;
end if;
end;
member function next_token(self in out nocopy string_tokenizer) return varchar2 as
l_next_delim_pos number;
l_next_enclose_pos number;
l_token varchar2(4000);
begin
if self.has_more_tokens() = 1 then
l_next_delim_pos := instr(self.value_string, self.delimiter, self.parser_current_pos);
if l_next_delim_pos = 0 then
l_token := substr(value_string, self.parser_current_pos);
parser_current_pos := length(self.value_string) + 1;
else
l_token := substr(self.value_string, self.parser_current_pos,
l_next_delim_pos - self.parser_current_pos);
parser_current_pos := l_next_delim_pos + length(self.delimiter);
end if;
else
l_token := null;
end if;
self.last_token := l_token;
return l_token;
end;
static function all_tokens (p_string in varchar2, p_delim in varchar2)
return token_list pipelined parallel_enable is
l_st string_tokenizer := string_tokenizer(p_string, p_delim);
l_startpos number;
l_token varchar2(4000);
begin
while l_st.has_more_tokens = 1 loop
l_startpos := l_st.parser_current_pos;
l_token := l_st.next_token();
pipe row (token_t(l_token, l_startpos, nvl(length(l_token),0)));
end loop;
return;
end;
static function all_tokens_cur (p_cursor in sys_refcursor, p_delim in varchar2)
return token_list pipelined parallel_enable (partition p_cursor by any) is
l_st string_tokenizer;
l_string varchar2(4000);
l_startpos number;
l_token varchar2(4000);
begin
loop
fetch p_cursor into l_string;
exit when p_cursor%notfound;
l_st := string_tokenizer(l_string, p_delim);
while l_st.has_more_tokens = 1 loop
l_startpos := l_st.parser_current_pos;
l_token := l_st.next_token();
pipe row (token_t(l_token, l_startpos, nvl(length(l_token),0)));
end loop;
end loop;
return;
end;
end;
/-- Using STRING_TOKENIZER as table function.
-- This is the table function example for STRING_TOKENIZER. It tokenized a string with " " as the delimiter.
select rownum, token_text, start_pos, length
from table(string_tokenizer.all_tokens('The quick brown fox jumps over the lazy dog.', ' ') ) ;-- Using STRING_TOKENIZER as table function
-- This is the table function example for STRING_TOKENIZER. It tokenized a string with "#" as the delimiter.
select rownum, token_text, start_pos, length
from table(string_tokenizer.all_tokens('##a#b#c#d#e#f##', '#') ) ;-- Procedural use of STRING_TOKENIZER
-- This example uses the STRING_TOKENIZER within a PLSQL WHILE loop. The loop is
-- running until HAS_MORE_TOKENS() returns zero and NEXT_TOKEN() returns the next token in the string.
create or replace procedure st_tester (p_string in varchar2, p_delim in varchar2) is
v_st string_tokenizer := string_tokenizer(p_string, p_delim);
v_cnt pls_integer := 0;
begin
while v_st.has_more_tokens() = 1 loop
dbms_output.put_line(v_cnt||': '||v_st.next_token());
v_cnt := v_cnt + 1;
end loop;
end;
/-- Procedural use of STRING_TOKENIZER
-- This example uses the STRING_TOKENIZER within a PLSQL WHILE loop. The loop is
-- running until HAS_MORE_TOKENS() returns zero and NEXT_TOKEN() returns the next token in the string.
begin
st_tester('The quick brown fox jumps over the lazy dog.', ' ');
end;
/
/*
A table function is a function executed with the TABLE operator,
and then within the FROM clause of a query - in other words, a function
that is selected from just like a relational table! A common usage of
table functions in the Data Warehousing world is to stream data directly
from one process or transformation, to the next process without
intermediate staging (a "streaming" table function). But you can also
leverage table functions to programatically create a dataset and then
make it available in SQL.
*/-- Create Nested Table Type
/*
A table function must return a collection that is visible in the SQL layer.
So the collection's type must be defined at the schema-level as shown here or
in 12.1 and higher in a package specification.
*/CREATE OR REPLACE TYPE names_nt IS TABLE OF VARCHAR2 ( 1000 );
/-- Silly Dataset Generator
/*
I need to generated N number of names. Here's a function that does it.
It's a silly example. I can do this in SQL, too, but it demonstrates the ability
to programmatically (procedurally) populate a collection.
*/CREATE OR REPLACE FUNCTION lotsa_names (
base_name_in IN VARCHAR2
, count_in IN INTEGER
)
RETURN names_nt
IS
retval names_nt := names_nt ( );
BEGIN
retval.EXTEND ( count_in );
FOR indx IN 1 .. count_in
LOOP
retval ( indx ) := base_name_in || ' ' || indx;
END LOOP;
RETURN retval;
END lotsa_names;
/-- Call table function inside SELECT
/*
And there you have it, folks! Embed the function invocation inside the
TABLE operator, in the FROM clause, and Oracle Database works its magic for you. And when you have a collection of scalars, the column name is hard-coded to
COLUMN_VALUE but you can use a column alias to change it to whatever you'd like.
*/SELECT COLUMN_VALUE my_name
FROM TABLE ( lotsa_names ( 'Steven', 100 )) names ;-- A "Table" Just Like Any Other
/*
Once TABLE has transformed your collection into a relational dataset,
you can join it to other tables, perform unions, etc. Whatever you would
and could do with a "normal" table or view.
*/SELECT COLUMN_VALUE my_alias
FROM hr.employees, TABLE ( lotsa_names ( 'Steven', 10 )) names ;-- Return Cursor Variable to Dataset
/*
Here's an example of calling the table function, converting to a SQL dataset,
assigning it to a cursor variable, and then returning that via the function.
This function could then be invoked from a host environment, say a Java program,
and the data will be consumed. That Java or UI developer will have no idea
how the data set was constructed, and why should they care?
*/CREATE OR REPLACE FUNCTION lotsa_names_cv (
base_name_in IN VARCHAR2
, count_in IN INTEGER
)
RETURN sys_refcursor
IS
retval sys_refcursor;
BEGIN
OPEN retval FOR
SELECT COLUMN_VALUE
FROM TABLE ( lotsa_names ( base_name_in, count_in )) names;
RETURN retval;
END lotsa_names_cv;
/DECLARE
l_names_cur sys_refcursor;
l_name VARCHAR2 ( 32767 );
BEGIN
l_names_cur := lotsa_names_cv ( 'Steven', 100 );
LOOP
FETCH l_names_cur INTO l_name;
EXIT WHEN l_names_cur%NOTFOUND;
DBMS_OUTPUT.put_line ( 'Name = ' || l_name );
END LOOP;
CLOSE l_names_cur;
END;
/