2018-05-26 12:32:15 +00:00
|
|
|
package abstract_sql
|
|
|
|
|
|
|
|
import (
|
2019-03-15 22:55:34 +00:00
|
|
|
"context"
|
2018-05-26 12:32:15 +00:00
|
|
|
"database/sql"
|
2018-05-27 18:52:26 +00:00
|
|
|
"fmt"
|
2020-09-01 07:21:19 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2018-05-26 12:32:15 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2020-03-08 01:01:39 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-01-19 20:06:19 +00:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2020-09-02 17:09:49 +00:00
|
|
|
"strings"
|
2021-01-19 21:53:16 +00:00
|
|
|
"sync"
|
2018-05-26 12:32:15 +00:00
|
|
|
)
|
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
type SqlGenerator interface {
|
|
|
|
GetSqlInsert(bucket string) string
|
|
|
|
GetSqlUpdate(bucket string) string
|
|
|
|
GetSqlFind(bucket string) string
|
|
|
|
GetSqlDelete(bucket string) string
|
|
|
|
GetSqlDeleteFolderChildren(bucket string) string
|
|
|
|
GetSqlListExclusive(bucket string) string
|
|
|
|
GetSqlListInclusive(bucket string) string
|
2021-01-19 23:55:51 +00:00
|
|
|
GetSqlCreateTable(bucket string) string
|
|
|
|
GetSqlDropTable(bucket string) string
|
2021-01-19 21:53:16 +00:00
|
|
|
}
|
|
|
|
|
2018-05-26 12:32:15 +00:00
|
|
|
type AbstractSqlStore struct {
|
2021-01-19 21:53:16 +00:00
|
|
|
SqlGenerator
|
2021-01-19 23:55:51 +00:00
|
|
|
DB *sql.DB
|
|
|
|
SupportBucketTable bool
|
|
|
|
dbs map[string]bool
|
|
|
|
dbsLock sync.Mutex
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
2021-03-14 06:07:39 +00:00
|
|
|
func (store *AbstractSqlStore) OnBucketCreation(bucket string) {
|
|
|
|
store.dbsLock.Lock()
|
|
|
|
defer store.dbsLock.Unlock()
|
|
|
|
|
|
|
|
if store.dbs == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
store.dbs[bucket] = true
|
|
|
|
}
|
|
|
|
func (store *AbstractSqlStore) OnBucketDeletion(bucket string) {
|
|
|
|
store.dbsLock.Lock()
|
|
|
|
defer store.dbsLock.Unlock()
|
|
|
|
|
|
|
|
if store.dbs == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(store.dbs, bucket)
|
|
|
|
}
|
|
|
|
|
2021-01-19 20:34:58 +00:00
|
|
|
const (
|
2021-01-19 21:53:16 +00:00
|
|
|
DEFAULT_TABLE = "filemeta"
|
2021-01-19 20:34:58 +00:00
|
|
|
)
|
|
|
|
|
2019-03-31 06:08:29 +00:00
|
|
|
type TxOrDB interface {
|
|
|
|
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
|
|
|
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
|
|
|
|
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) {
|
2019-04-01 18:03:04 +00:00
|
|
|
tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{
|
|
|
|
Isolation: sql.LevelReadCommitted,
|
|
|
|
ReadOnly: false,
|
|
|
|
})
|
2019-03-31 06:08:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return ctx, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return context.WithValue(ctx, "tx", tx), nil
|
|
|
|
}
|
|
|
|
func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error {
|
|
|
|
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
|
|
|
|
return tx.Commit()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
|
|
|
|
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
|
|
|
|
return tx.Rollback()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-19 20:34:58 +00:00
|
|
|
func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.FullPath, isForChildren bool) (txOrDB TxOrDB, bucket string, shortPath util.FullPath, err error) {
|
2021-01-19 21:53:16 +00:00
|
|
|
|
2021-01-19 20:34:58 +00:00
|
|
|
shortPath = fullpath
|
2021-01-19 21:53:16 +00:00
|
|
|
bucket = DEFAULT_TABLE
|
|
|
|
|
2019-03-31 06:08:29 +00:00
|
|
|
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
|
2021-01-19 21:53:16 +00:00
|
|
|
txOrDB = tx
|
|
|
|
} else {
|
|
|
|
txOrDB = store.DB
|
|
|
|
}
|
|
|
|
|
2021-01-19 23:55:51 +00:00
|
|
|
if !store.SupportBucketTable {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-19 22:05:48 +00:00
|
|
|
if !strings.HasPrefix(string(fullpath), "/buckets/") {
|
2021-01-19 21:53:16 +00:00
|
|
|
return
|
2019-03-31 06:08:29 +00:00
|
|
|
}
|
2021-01-19 21:53:16 +00:00
|
|
|
|
|
|
|
// detect bucket
|
|
|
|
bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
|
|
|
|
t := strings.Index(bucketAndObjectKey, "/")
|
|
|
|
if t < 0 && !isForChildren {
|
|
|
|
return
|
|
|
|
}
|
2021-01-20 01:21:50 +00:00
|
|
|
bucket = bucketAndObjectKey
|
|
|
|
shortPath = "/"
|
2021-01-19 21:53:16 +00:00
|
|
|
if t > 0 {
|
|
|
|
bucket = bucketAndObjectKey[:t]
|
|
|
|
shortPath = util.FullPath(bucketAndObjectKey[t:])
|
|
|
|
}
|
|
|
|
|
|
|
|
if isValidBucket(bucket) {
|
|
|
|
store.dbsLock.Lock()
|
|
|
|
defer store.dbsLock.Unlock()
|
|
|
|
|
|
|
|
if store.dbs == nil {
|
|
|
|
store.dbs = make(map[string]bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, found := store.dbs[bucket]; !found {
|
2021-02-16 00:19:24 +00:00
|
|
|
if err = store.CreateTable(ctx, bucket); err == nil {
|
2021-01-19 21:53:16 +00:00
|
|
|
store.dbs[bucket] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2019-03-31 06:08:29 +00:00
|
|
|
}
|
|
|
|
|
2020-09-01 07:21:19 +00:00
|
|
|
func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
2018-05-26 12:32:15 +00:00
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
|
2021-01-19 20:34:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
dir, name := shortPath.DirAndName()
|
2018-05-26 12:32:15 +00:00
|
|
|
meta, err := entry.EncodeAttributesAndChunks()
|
|
|
|
if err != nil {
|
2018-05-27 05:02:49 +00:00
|
|
|
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-03 18:00:20 +00:00
|
|
|
if len(entry.Chunks) > 50 {
|
|
|
|
meta = util.MaybeGzipData(meta)
|
|
|
|
}
|
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta)
|
2020-09-12 20:37:03 +00:00
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.Contains(strings.ToLower(err.Error()), "duplicate") {
|
2020-12-14 04:49:44 +00:00
|
|
|
// return fmt.Errorf("insert: %s", err)
|
|
|
|
// skip this since the error can be in a different language
|
2020-08-20 14:52:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// now the insert failed possibly due to duplication constraints
|
2020-09-12 20:37:03 +00:00
|
|
|
glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
|
2020-08-20 14:52:46 +00:00
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
2020-08-20 14:52:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("upsert %s: %s", entry.FullPath, err)
|
|
|
|
}
|
|
|
|
|
2018-05-26 12:32:15 +00:00
|
|
|
_, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
2020-08-20 14:52:46 +00:00
|
|
|
return fmt.Errorf("upsert %s but no rows affected: %s", entry.FullPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
return nil
|
2020-08-20 14:52:46 +00:00
|
|
|
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-01 07:21:19 +00:00
|
|
|
func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
2018-05-26 12:32:15 +00:00
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
|
2021-01-19 20:34:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
dir, name := shortPath.DirAndName()
|
2018-05-26 12:32:15 +00:00
|
|
|
meta, err := entry.EncodeAttributesAndChunks()
|
|
|
|
if err != nil {
|
2018-05-27 05:02:49 +00:00
|
|
|
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
res, err := db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
2018-05-26 12:32:15 +00:00
|
|
|
if err != nil {
|
2018-05-27 05:02:49 +00:00
|
|
|
return fmt.Errorf("update %s: %s", entry.FullPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
2018-05-27 05:02:49 +00:00
|
|
|
return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-01 07:21:19 +00:00
|
|
|
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) {
|
2018-05-26 12:32:15 +00:00
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
|
2021-01-19 20:34:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
dir, name := shortPath.DirAndName()
|
2021-01-19 21:53:16 +00:00
|
|
|
row := db.QueryRowContext(ctx, store.GetSqlFind(bucket), util.HashStringToLong(dir), name, dir)
|
2020-09-02 05:47:57 +00:00
|
|
|
|
2018-05-26 12:32:15 +00:00
|
|
|
var data []byte
|
|
|
|
if err := row.Scan(&data); err != nil {
|
2020-09-02 05:47:57 +00:00
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
return nil, filer_pb.ErrNotFound
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("find %s: %v", fullpath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-01 07:21:19 +00:00
|
|
|
entry := &filer.Entry{
|
2018-05-26 12:32:15 +00:00
|
|
|
FullPath: fullpath,
|
|
|
|
}
|
2020-09-03 18:00:20 +00:00
|
|
|
if err := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
2018-05-27 05:02:49 +00:00
|
|
|
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return entry, nil
|
|
|
|
}
|
|
|
|
|
2020-03-23 07:01:34 +00:00
|
|
|
func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
|
2018-05-26 12:32:15 +00:00
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
|
2021-01-19 20:34:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("findDB %s : %v", fullpath, err)
|
|
|
|
}
|
2018-05-26 12:32:15 +00:00
|
|
|
|
2021-01-19 20:34:58 +00:00
|
|
|
dir, name := shortPath.DirAndName()
|
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
res, err := db.ExecContext(ctx, store.GetSqlDelete(bucket), util.HashStringToLong(dir), name, dir)
|
2018-05-26 12:32:15 +00:00
|
|
|
if err != nil {
|
2018-05-31 03:24:57 +00:00
|
|
|
return fmt.Errorf("delete %s: %s", fullpath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
2018-05-31 03:24:57 +00:00
|
|
|
return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 03:24:57 +00:00
|
|
|
return nil
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 07:01:34 +00:00
|
|
|
func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
|
2019-12-13 08:23:05 +00:00
|
|
|
|
2021-01-19 20:34:58 +00:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, true)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("findDB %s : %v", fullpath, err)
|
|
|
|
}
|
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
if isValidBucket(bucket) && shortPath == "/" {
|
2021-01-20 01:21:50 +00:00
|
|
|
if err = store.deleteTable(ctx, bucket); err == nil {
|
2021-01-19 21:53:16 +00:00
|
|
|
store.dbsLock.Lock()
|
|
|
|
delete(store.dbs, bucket)
|
|
|
|
store.dbsLock.Unlock()
|
|
|
|
return nil
|
2021-01-20 01:21:50 +00:00
|
|
|
} else {
|
|
|
|
return err
|
2021-01-19 21:53:16 +00:00
|
|
|
}
|
2021-01-19 20:34:58 +00:00
|
|
|
}
|
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), fullpath)
|
2019-12-13 08:23:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-16 07:56:24 +00:00
|
|
|
func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
2021-01-19 20:34:58 +00:00
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true)
|
2021-01-19 20:34:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
|
|
|
|
}
|
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
sqlText := store.GetSqlListExclusive(bucket)
|
2021-01-15 06:44:22 +00:00
|
|
|
if includeStartFile {
|
2021-01-19 21:53:16 +00:00
|
|
|
sqlText = store.GetSqlListInclusive(bucket)
|
2018-05-26 20:35:56 +00:00
|
|
|
}
|
|
|
|
|
2021-01-19 20:34:58 +00:00
|
|
|
rows, err := db.QueryContext(ctx, sqlText, util.HashStringToLong(string(shortPath)), startFileName, string(shortPath), prefix+"%", limit+1)
|
2018-05-26 12:32:15 +00:00
|
|
|
if err != nil {
|
2021-01-16 07:56:24 +00:00
|
|
|
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
defer rows.Close()
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var name string
|
|
|
|
var data []byte
|
|
|
|
if err = rows.Scan(&name, &data); err != nil {
|
2021-01-15 06:44:22 +00:00
|
|
|
glog.V(0).Infof("scan %s : %v", dirPath, err)
|
2021-01-16 07:56:24 +00:00
|
|
|
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
2021-01-16 07:56:24 +00:00
|
|
|
lastFileName = name
|
2018-05-26 12:32:15 +00:00
|
|
|
|
2020-09-01 07:21:19 +00:00
|
|
|
entry := &filer.Entry{
|
2021-01-15 06:44:22 +00:00
|
|
|
FullPath: util.NewFullPath(string(dirPath), name),
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
2020-09-03 18:00:20 +00:00
|
|
|
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
2018-05-27 05:02:49 +00:00
|
|
|
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
|
2021-01-16 07:56:24 +00:00
|
|
|
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
|
|
|
|
2021-01-16 07:56:24 +00:00
|
|
|
if !eachEntryFunc(entry) {
|
|
|
|
break
|
|
|
|
}
|
2018-05-26 12:32:15 +00:00
|
|
|
|
2021-01-15 06:44:22 +00:00
|
|
|
}
|
|
|
|
|
2021-01-16 07:56:24 +00:00
|
|
|
return lastFileName, nil
|
2018-05-26 12:32:15 +00:00
|
|
|
}
|
2020-03-15 03:30:26 +00:00
|
|
|
|
2021-01-16 07:56:24 +00:00
|
|
|
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
|
|
|
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", nil)
|
2020-08-05 19:37:42 +00:00
|
|
|
}
|
2020-03-15 03:30:26 +00:00
|
|
|
|
|
|
|
func (store *AbstractSqlStore) Shutdown() {
|
|
|
|
store.DB.Close()
|
|
|
|
}
|
2021-01-19 20:34:58 +00:00
|
|
|
|
2021-01-19 21:53:16 +00:00
|
|
|
func isValidBucket(bucket string) bool {
|
|
|
|
return bucket != DEFAULT_TABLE && bucket != ""
|
|
|
|
}
|
|
|
|
|
2021-01-20 02:07:29 +00:00
|
|
|
func (store *AbstractSqlStore) CreateTable(ctx context.Context, bucket string) error {
|
2021-01-19 23:55:51 +00:00
|
|
|
if !store.SupportBucketTable {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
_, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlCreateTable(bucket))
|
|
|
|
return err
|
2021-01-19 21:53:16 +00:00
|
|
|
}
|
|
|
|
|
2021-01-19 23:55:51 +00:00
|
|
|
func (store *AbstractSqlStore) deleteTable(ctx context.Context, bucket string) error {
|
|
|
|
if !store.SupportBucketTable {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
_, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlDropTable(bucket))
|
|
|
|
return err
|
2021-01-19 20:34:58 +00:00
|
|
|
}
|