[linux-yocto] [PATCH 09/65] mmc: add owl mmc driver
Jiang Lu
lu.jiang at windriver.com
Wed Dec 21 01:16:10 PST 2016
From: wurui <wurui at actions-semi.com>
commit cd7dc353af74e7f78b94dba40dafc33dffe8aeec from
https://github.com/xapp-le/kernel.git
Change-Id: I16e4d0e0b2d040108185703f3821e884b97f1a66
---
drivers/mmc/card/Kconfig | 24 +-
drivers/mmc/card/Makefile | 15 +-
drivers/mmc/card/block.c | 22 +-
drivers/mmc/card/card_proc.c | 513 ++++
drivers/mmc/card/tsd_block.c | 4985 +++++++++++++++++++++++++++++++++++
drivers/mmc/card/tsd_block.h | 203 ++
drivers/mmc/card/tsd_queue.c | 600 +++++
drivers/mmc/card/tsd_queue.h | 87 +
drivers/mmc/core/bus.c | 113 +-
drivers/mmc/core/core.c | 37 +-
drivers/mmc/core/host.c | 24 +-
drivers/mmc/core/sd.c | 22 +
drivers/mmc/core/sdio.c | 38 +
drivers/mmc/core/sdio_io.c | 3 +
drivers/mmc/host/Kconfig | 9 +
drivers/mmc/host/Makefile | 2 +
drivers/mmc/host/dw_mmc.h | 2 +-
drivers/mmc/host/gl520x_dma.h | 143 +
drivers/mmc/host/gl520x_mmc.c | 2149 +++++++++++++++
drivers/mmc/host/gl520x_mmc.h | 408 +++
drivers/mmc/host/gl520x_wifi_test.c | 57 +
drivers/mmc/host/gl520x_wifi_test.h | 7 +
drivers/mmc/host/wlan_device.c | 215 ++
drivers/mmc/host/wlan_device.h | 10 +
drivers/mmc/host/wlan_driver.c | 185 ++
drivers/mmc/host/wlan_plat_data.h | 23 +
include/linux/mmc/core.h | 1 +
include/linux/mmc/host.h | 14 +
28 files changed, 9897 insertions(+), 14 deletions(-)
mode change 100644 => 100755 drivers/mmc/card/Kconfig
mode change 100644 => 100755 drivers/mmc/card/Makefile
mode change 100644 => 100755 drivers/mmc/card/block.c
create mode 100755 drivers/mmc/card/card_proc.c
create mode 100755 drivers/mmc/card/tsd_block.c
create mode 100755 drivers/mmc/card/tsd_block.h
create mode 100755 drivers/mmc/card/tsd_queue.c
create mode 100755 drivers/mmc/card/tsd_queue.h
mode change 100644 => 100755 drivers/mmc/core/bus.c
mode change 100644 => 100755 drivers/mmc/core/core.c
mode change 100644 => 100755 drivers/mmc/core/host.c
mode change 100644 => 100755 drivers/mmc/core/sd.c
mode change 100644 => 100755 drivers/mmc/host/Kconfig
mode change 100644 => 100755 drivers/mmc/host/Makefile
mode change 100644 => 100755 drivers/mmc/host/dw_mmc.h
create mode 100755 drivers/mmc/host/gl520x_dma.h
create mode 100755 drivers/mmc/host/gl520x_mmc.c
create mode 100755 drivers/mmc/host/gl520x_mmc.h
create mode 100755 drivers/mmc/host/gl520x_wifi_test.c
create mode 100755 drivers/mmc/host/gl520x_wifi_test.h
create mode 100755 drivers/mmc/host/wlan_device.c
create mode 100755 drivers/mmc/host/wlan_device.h
create mode 100755 drivers/mmc/host/wlan_driver.c
create mode 100755 drivers/mmc/host/wlan_plat_data.h
mode change 100644 => 100755 include/linux/mmc/core.h
mode change 100644 => 100755 include/linux/mmc/host.h
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
old mode 100644
new mode 100755
index 5562308..cbf7109
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -13,12 +13,28 @@ config MMC_BLOCK
This provides a block device driver, which you can use to
mount the filesystem. Almost everyone wishing MMC support
should say Y or M here.
-
+config TSD_MMC_BLOCK
+ tristate "ACTION TSD MMC block device driver"
+ depends on BLOCK
+ help
+ Say Y here to enable the tsd MMC block device driver support.
+ This provides a tsd block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
+
+config CARD_TO_CARD_MMC_BLOCK
+ tristate "ACTION CARD_TO_CARD MMC block device driver"
+ depends on BLOCK
+ help
+ Say Y here to enable the MMC block device driver support.
+ This provides a card to card block device driver, which you can use to
+ mount the filesystem. Almost everyone wishing MMC support
+ should say Y or M here.
config MMC_BLOCK_MINORS
int "Number of minors per block device"
depends on MMC_BLOCK
range 4 256
- default 8
+ default 16
help
Number of minors per block device. One is needed for every
partition on the disk (plus one for the whole disk).
@@ -27,10 +43,10 @@ config MMC_BLOCK_MINORS
of supported block devices will be limited to 256 divided
by this number.
- Default is 8 to be backwards compatible with previous
+ Default is 16 to be backwards compatible with previous
hardwired device numbering.
- If unsure, say 8 here.
+ If unsure, say 16 here.
config MMC_BLOCK_BOUNCE
bool "Use bounce buffer for simple hosts"
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
old mode 100644
new mode 100755
index c73b406..58a9660
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -2,9 +2,16 @@
# Makefile for MMC/SD card drivers
#
-obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
-mmc_block-objs := block.o queue.o
-obj-$(CONFIG_MMC_TEST) += mmc_test.o
+obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
+mmc_block-objs := block.o queue.o
-obj-$(CONFIG_SDIO_UART) += sdio_uart.o
+
+obj-$(CONFIG_TSD_MMC_BLOCK) += tsd_mmc_block.o
+tsd_mmc_block-objs := tsd_block.o tsd_queue.o card_proc.o
+
+obj-$(CONFIG_CARD_TO_CARD_MMC_BLOCK) += card_to_card.o
+card_to_card-objs := tsd_block.o tsd_queue.o card_proc.o
+
+obj-$(CONFIG_MMC_TEST) += mmc_test.o
+obj-$(CONFIG_SDIO_UART) += sdio_uart.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
old mode 100644
new mode 100755
index 7435b4c..636ecee
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -41,10 +41,12 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
+#include <mach/bootdev.h>
#include <asm/uaccess.h>
#include "queue.h"
+#include "./../host/gl520x_mmc.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
@@ -1117,7 +1119,10 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
int err;
+ err= sd_mmc_reinit(host);
+ return err;
+#if 0
if (md->reset_done & type)
return -EEXIST;
@@ -1140,6 +1145,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
}
}
return err;
+#endif
}
static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
@@ -2215,7 +2221,10 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
{
struct mmc_blk_data *md;
int devidx, ret;
+ int boot_dev;
+ struct gl520xmmc_host *hcd;
+ hcd = mmc_priv(card->host);
devidx = find_first_zero_bit(dev_use, max_devices);
if (devidx >= max_devices)
return ERR_PTR(-ENOSPC);
@@ -2234,8 +2243,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
* index anymore so we keep track of a name index.
*/
if (!subname) {
- md->name_idx = find_first_zero_bit(name_use, max_devices);
+ //md->name_idx = find_first_zero_bit(name_use, max_devices);
+ __set_bit(0, name_use);
+ boot_dev = owl_get_boot_dev();
+ if(boot_dev > 0 && (boot_dev - OWL_BOOTDEV_SD0 == hcd->id)){
+ md->name_idx = 0;
+ } else {
+ md->name_idx = find_first_zero_bit(name_use, max_devices);
+ }
__set_bit(md->name_idx, name_use);
+
+ printk("## md->name_idx: %d\n", md->name_idx);
} else
md->name_idx = ((struct mmc_blk_data *)
dev_to_disk(parent)->private_data)->name_idx;
@@ -2683,7 +2701,7 @@ static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
static struct mmc_driver mmc_driver = {
.drv = {
- .name = "mmcblk",
+ .name = "sd_card",
.pm = &mmc_blk_pm_ops,
},
.probe = mmc_blk_probe,
diff --git a/drivers/mmc/card/card_proc.c b/drivers/mmc/card/card_proc.c
new file mode 100755
index 0000000..bd710e2
--- /dev/null
+++ b/drivers/mmc/card/card_proc.c
@@ -0,0 +1,513 @@
+#include <linux/types.h>
+#include <linux/semaphore.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <mach/bootdev.h>
+
+
+extern unsigned int card_total_size;
+
+extern int tSD_single_blk_test(unsigned start, unsigned blocks);
+extern int __do_adfu_read(unsigned start, unsigned nsector, void *buf);
+extern int __do_adfu_write(unsigned start, unsigned nsector, void * buf);
+
+extern void NAND_ShowMiscInfoAll(void);
+
+extern int NAND_WriteMiscInfo(int type, char * buf, int size);
+extern int NAND_GetMiscInfo(int type, char * buf, int size);
+
+extern unsigned int card_total_size;
+unsigned int tSD_get_logic_cat(void);
+static ssize_t card_proc_storage_type_read (struct file *file,
+ char __user *buffer, size_t count, loff_t * ppos);
+
+static ssize_t card_proc_phy_cap_read (struct file *file,
+ char __user *buffer, size_t count, loff_t * ppos);
+
+static ssize_t card_proc_logic_cap_read (struct file *file,
+ char __user *buffer, size_t count, loff_t * ppos) ;
+
+static ssize_t card_proc_info_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t * ppos);
+
+
+#define CARD_DIR_NAME "mmc"
+#define STORAGE_TYPE_NAME "storage_type"
+#define CARD_LOGIC_FILE_NAME "logic_cap"
+#define CARD_PHY_FILE_NAME "phy_cap"
+#define CARD_INFO "mmc_info"
+
+static struct proc_dir_entry *proc_card = NULL;
+static struct proc_dir_entry *proc_storage_type = NULL;
+static struct proc_dir_entry *proc_card_logic_info = NULL;
+static struct proc_dir_entry *proc_card_cap = NULL;
+static struct proc_dir_entry *proc_card_info = NULL;
+
+#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
+ || ('a' <= (c) && (c) <= 'f') \
+ || ('A' <= (c) && (c) <= 'F'))
+
+#define isdigit(c) ('0' <= (c) && (c) <= '9')
+#define islower(c) ('a' <= (c) && (c) <= 'z')
+#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
+
+#define my_isspace(c) (c == ' ' || c == '\t' || c == 10 || c == 13 || c == 0)
+#define isspace(c) (c == ' ' || c == '\t' || c == 10 || c == 13 || c == 0)
+#define TOLOWER(x) ((x) | 0x20)
+
+
+static const struct file_operations proc_ops[] = {
+ {
+ .owner = THIS_MODULE,
+
+ .read = card_proc_logic_cap_read,
+ },
+ {
+ .owner = THIS_MODULE,
+ .read = card_proc_phy_cap_read,
+ },
+ {
+ .owner = THIS_MODULE,
+ .read = card_proc_storage_type_read,
+ },
+
+ {
+ .owner = THIS_MODULE,
+ .write = card_proc_info_write,
+ },
+
+};
+
+
+
+
+
+static int my_atoi(char *psz_buf)
+{
+ char *pch = psz_buf;
+ int base = 0;
+ unsigned long long result = 0;
+ unsigned int value;
+
+ while (my_isspace(*pch))
+ pch++;
+
+ if (*pch == '-' || *pch == '+') {
+ base = 10;
+ pch++;
+ } else if (*pch && TOLOWER(pch[strlen(pch) - 1]) == 'h') {
+ base = 16;
+ }
+
+ if (pch[0] == '0') {
+ if (TOLOWER(pch[1]) == 'x' && isxdigit(pch[2]))
+ base = 16;
+ else
+ base = 8;
+ } else {
+ base = 10;
+ }
+
+ if (base == 16 && pch[0] == '0' && TOLOWER(pch[1]) == 'x')
+ pch += 2;
+
+ while (isxdigit(*pch)) {
+
+ value = isdigit(*pch) ? *pch - '0' : TOLOWER(*pch) - 'a' + 10;
+ if (value >= base)
+ break;
+ result = result * base + value;
+ pch++;
+ }
+
+ return result;
+}
+
+/******************************************************************************/
+/*!
+* \par Description:
+* dump memory
+* \param[in] startaddr, start address
+* \param[in] size, size of the memory
+* \param[in] showaddr, offset of the memory that wwe want to show
+* \param[in] show_bytes, show type: 1\A3\BAbyte\A3\AC2\A3\BAshort\A3\AC4\A3\BAint
+* \ingroup card_proc.c
+* \par exmaple code
+* \code
+* dump_mem(ret_v0_uncached, 512, 0, 1);
+* \endcode
+*******************************************************************************/
+void dump_mem(void *startaddr, unsigned int size, unsigned int showaddr, unsigned int show_bytes)
+{
+ unsigned int i, count, count_per_line;
+ void *addr = startaddr;
+
+ if ((show_bytes!=1) && (show_bytes!=2) && (show_bytes!=4)){
+ printk("dump_mem: not support mode\n");
+ return;
+ }
+
+ if (((unsigned int)startaddr & (show_bytes -1)) ||\
+ ((unsigned int)size & (show_bytes -1))){
+ printk("dump_mem: startaddr must be align by %d bytes!\n", show_bytes);
+ return;
+ }
+
+ count = size / show_bytes;
+ count_per_line = 16 / show_bytes; // 16 bytes per line
+
+ printk("\nstartaddr %p, size %d, count %d, count_per_line %d\n",
+ startaddr, size, count, count_per_line);
+
+ i = 0;
+ while(i < count){
+
+ if (!(i % count_per_line)){
+ printk("\n%08x: ", showaddr + (i / count_per_line) * 16);
+ }
+
+ switch (show_bytes){
+ case 1:
+ printk("%02x ", *((unsigned char *)addr + i));
+ break;
+ case 2:
+ printk("%04x ", *((unsigned short *)addr + i));
+ break;
+ case 4:
+ printk("%08x ", *((unsigned int *)addr + i));
+ break;
+ default:
+ printk("dump_mem: not support mode\n");
+
+ return;
+ }
+
+ i++;
+ }
+ printk("\n");
+}
+
+
+void init_mem(unsigned char * buf, int seq, int size)
+{
+ int i;
+ for(i=0;i<size;i++){
+ buf[i] = seq++;
+ }
+
+}
+
+
+static ssize_t card_proc_storage_type_read (struct file *file,
+ char __user *buffer, size_t count, loff_t * ppos)
+{
+
+ int len;
+ int ret;
+ unsigned int boot_dev = 0;
+ char * emmc_buf = "emmc";
+ char * tsd_buf = "tsd";
+ char * nand_buf = "nand";
+
+ boot_dev=owl_get_boot_dev();
+
+ if(*ppos > 0)
+ return 0;
+
+ if(boot_dev == OWL_BOOTDEV_SD2){
+
+ ret = copy_to_user(buffer,emmc_buf,strlen(emmc_buf));
+ len = strlen(emmc_buf);
+ }else if(boot_dev == OWL_BOOTDEV_SD0){
+ ret = copy_to_user(buffer,tsd_buf,strlen(tsd_buf));
+ len = strlen(tsd_buf);
+ }else{
+ ret = copy_to_user(buffer,nand_buf,strlen(nand_buf));
+ len = strlen(nand_buf);
+ }
+
+ if(ret){
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ return len;
+
+}
+
+
+static ssize_t card_proc_phy_cap_read (struct file *file,
+ char __user *buffer, size_t count, loff_t * ppos)
+{
+
+ int len;
+ char buf[32]={0};
+
+ unsigned int total_size; //Mb
+
+ if(*ppos > 0)
+ return 0;
+ total_size = card_total_size / 2048 ;
+
+ len = sprintf(buf, "%d\n", total_size);
+ if(copy_to_user(buffer, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ return len;
+}
+
+static ssize_t card_proc_logic_cap_read (struct file *file,
+ char __user *buffer, size_t count, loff_t * ppos)
+{
+ int len;
+ char buf[32]={0};
+ unsigned int logic_size; //Mb
+
+ if(*ppos > 0)
+ return 0;
+
+ logic_size = tSD_get_logic_cat()/2048 ;
+
+ len = sprintf(buf, "%d\n", logic_size);
+ if(copy_to_user(buffer, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ return len;
+}
+
+
+static ssize_t card_proc_info_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t * ppos)
+{
+
+ loff_t pos = *ppos;
+ char *buf = NULL;
+ char *card_read_buf = NULL;
+ char *card_write_buf = NULL;
+ char * b = NULL;
+
+ char op;
+ int ret = 0;
+ int i,j;
+
+ int argc,blk_num, card_addr;
+ int param[4]= {-1};
+ char **argv = NULL;
+
+ printk("%s, count: %u\n", __FUNCTION__, count);
+ buf = kzalloc(count+1, GFP_KERNEL);
+ if(buf == NULL){
+ printk("buf alloc error \n");
+ count = 0;
+ goto out4;
+
+ }
+ card_read_buf = kzalloc(512 * 1024 ,GFP_KERNEL);
+ if(card_read_buf == NULL){
+ printk("card_read_buf alloc error \n");
+ count = 0;
+ goto out3;
+
+ }
+
+ card_write_buf = kzalloc(512 * 1024 ,GFP_KERNEL);
+ if(card_write_buf == NULL){
+ printk("card_write_buf alloc error \n");
+ count = 0;
+ goto out2;
+
+ }
+
+ b = kmalloc(512, GFP_KERNEL);
+ if(NULL == b){
+ printk("%s %d:fail malloc buf\n",__FUNCTION__,__LINE__);
+ count = 0;
+ goto out1;
+ }
+
+ ret = copy_from_user(buf, buffer, count);
+ if(ret){
+ printk("%s err!\n", __FUNCTION__);
+ }
+
+ buf[count-1] = 0;
+
+ printk("%s\n", buf);
+
+
+
+ argv = argv_split(GFP_KERNEL, buf, &argc);
+
+ for(i=0,j=0;i<argc;i++){
+ //printk("%d %s %d %d!\n", i, argv[i], my_atoi(argv[i]), argc);
+ char tmp = argv[i][0];
+ if(isdigit(tmp)){
+ param[j++]= my_atoi(argv[i]);
+ }
+ }
+
+
+
+ op=buf[0];
+
+ switch(op){
+ case 'o':
+ printk("%s %d\n", __FUNCTION__, __LINE__);
+ break;
+ case 'r':
+ blk_num = (param[0] != -1) ? param[0]:0;
+ card_addr = (param[1] != -1) ? param[1]:0;
+ printk("card read, blk num: %d, card address: %d\n",blk_num, card_addr);
+
+ __do_adfu_read(card_addr, blk_num, card_read_buf);
+ //void dump_mem(void *startaddr, unsigned int size, unsigned int showaddr, unsigned int show_bytes)
+ dump_mem(card_read_buf, blk_num * 512, 0, 1);
+ break;
+ case 'w':
+ blk_num = (param[0] != -1) ? param[0]:0;
+ card_addr = (param[1] != -1) ? param[1]:0;
+ printk("card read, blk num: %d, card address: %d\n",blk_num, card_addr);
+ init_mem(card_write_buf, 0x0, 512 * 1024);
+ __do_adfu_write(card_addr, blk_num, card_write_buf);
+ break;
+ case 'u':
+
+ init_mem(b, 0x0, 308);
+ NAND_WriteMiscInfo(2,b,308);
+ kfree(b);
+ break;
+ case 's':
+
+ init_mem(b, 0x0, 16);
+ NAND_WriteMiscInfo(0,b,16);
+ kfree(b);
+ break;
+ case 'h': //read the hdcp
+
+ memset(b,0xff,512);
+ NAND_GetMiscInfo(2,b,308);
+ dump_mem(b, 308, 0, 1);
+ kfree(b);
+ break;
+ case 't':
+ memset(b,0xff,512);
+ NAND_GetMiscInfo(0,b,16);
+ dump_mem(b, 16, 0, 1);
+ kfree(b);
+ break;
+ case 'e':
+ printk("card_total_size:0x%x\n", card_total_size);
+
+ __do_adfu_read(card_total_size - 2048 + 1, 1, b);
+ dump_mem(b, 308, 0, 1);
+
+ memset(b,0xff,512);
+ __do_adfu_write(card_total_size - 2048 + 1, 1, b);
+ printk("after memset b(0xff)...\n");
+
+ __do_adfu_read(card_total_size - 2048 + 1, 1, b);
+ dump_mem(b, 308, 0, 1);
+ kfree(b);
+ break;
+ default:
+ printk("=== CARD PROC DEBUG KIT ====\n");
+ break;
+
+
+ }
+
+out1:
+ kfree(card_write_buf);
+out2:
+ kfree(card_read_buf);
+out3:
+ kfree(buf);
+
+out4:
+ argv_free(argv);
+ argv = NULL;
+
+ *ppos = pos + count;
+
+ return count;
+}
+
+
+int init_card_proc(void)
+{
+ proc_card = proc_mkdir(CARD_DIR_NAME, NULL);
+ if (!proc_card){
+ return -1;
+ }
+ /* card driver r/w infomation */
+ proc_card_logic_info= proc_create(CARD_LOGIC_FILE_NAME, 0, proc_card, &proc_ops[0]);
+ if (!proc_card_logic_info) {
+ return -1;
+ }
+
+
+ /* card cap information */
+ proc_card_cap= proc_create(CARD_PHY_FILE_NAME, 0, proc_card, &proc_ops[1]);
+ if (!proc_card_cap) {
+ return -1;
+ }
+
+
+ proc_storage_type= proc_create(STORAGE_TYPE_NAME, 0, proc_card, &proc_ops[2]);
+ if (!proc_storage_type) {
+ return -1;
+ }
+
+
+ proc_card_info= proc_create(CARD_INFO, 0, proc_card, &proc_ops[3]);
+ if (!proc_card_info) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int cleanup_card_proc(void)
+{
+
+ if(proc_card_info){
+ remove_proc_entry(CARD_INFO, proc_card);
+ }
+
+ if(proc_storage_type){
+ remove_proc_entry(STORAGE_TYPE_NAME, proc_card);
+ }
+
+ if(proc_card_cap){
+ remove_proc_entry(CARD_PHY_FILE_NAME, proc_card);
+ }
+
+ if(proc_card_logic_info){
+ remove_proc_entry(CARD_LOGIC_FILE_NAME, proc_card);
+ }
+
+
+ if (proc_card){
+ remove_proc_entry(CARD_DIR_NAME, proc_card);
+ }
+
+ return 0;
+}
+
+
+
+
+
+
diff --git a/drivers/mmc/card/tsd_block.c b/drivers/mmc/card/tsd_block.c
new file mode 100755
index 0000000..6530b2f
--- /dev/null
+++ b/drivers/mmc/card/tsd_block.c
@@ -0,0 +1,4985 @@
+/*
+ * Block driver for media (i.e., flash cards)
+ *
+ * Copyright 2002 Hewlett-Packard Company
+ * Copyright 2005-2008 Pierre Ossman
+ *
+ * Use consistent with the GNU GPL is permitted,
+ * provided that this copyright notice is
+ * preserved in its entirety in all copies and derived works.
+ *
+ * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
+ * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * Many thanks to Alessandro Rubini and Jonathan Corbet!
+ *
+ * Author: Andrew Christian
+ * 28 May 2002
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
+#include <linux/mmc/ioctl.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include <asm/uaccess.h>
+#include <linux/decompress/mm.h>
+#include <mach/secure_storage.h>
+#include <mach/bootdev.h>
+#include "tsd_queue.h"
+#include "tsd_block.h"
+
+#include "../../../../owl/platform/boot/include/address.h"
+#include "../../../../owl/platform/boot/include/afinfo.h"
+
+
+/*
+MODULE_ALIAS("mmc:block");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "mmcblk."
+*/
+
+#define INAND_CMD38_ARG_EXT_CSD 113
+#define INAND_CMD38_ARG_ERASE 0x00
+#define INAND_CMD38_ARG_TRIM 0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+
+#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
+ (req->cmd_flags & REQ_META)) && \
+ (rq_data_dir(req) == WRITE))
+#define PACKED_CMD_VER 0x01
+#define PACKED_CMD_WR 0x02
+
+static DEFINE_MUTEX(block_mutex);
+
+/*
+ * The defaults come from config options but can be overriden by module
+ * or bootarg options.
+ */
+
+static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
+
+/*
+ * We've only got one major, so number of mmcblk devices is
+ * limited to 256 / number of minors per device.
+ */
+static int max_devices;
+
+/* 256 minors, so at most 256 separate devices */
+static DECLARE_BITMAP(dev_use, 256);
+static DECLARE_BITMAP(name_use, 256);
+
+static int is_for_upgrade = 0; //fot test
+module_param(is_for_upgrade, int, S_IRUGO | S_IWUSR);
+static int is_force_format = 0;
+module_param(is_force_format, int, S_IRUGO | S_IWUSR);
+static int card_to_card = 0;
+module_param(card_to_card, int, S_IRUGO | S_IWUSR);
+
+static int tsd_major = 93; ////fot test
+static char *blkdev_name = "flash";
+
+//#define UPGRADE_DBG
+#ifdef UPGRADE_DBG
+#define UPGRADE_DBG_INF(fmt,args...) printk("%s,%d,"fmt,__FUNCTION__,__LINE__,##args);
+#else
+#define UPGRADE_DBG_INF(fmt,args...) do {} while(0)
+#endif
+
+int unassign_partnum;
+int new_partition_table = 0;
+int partition_inmbr = 0;
+int partition__logic_incard = 0;
+
+struct mmc_card *tSD_card;
+struct tSD_partinfo *tSD_part = NULL;
+partition_info_t *capinfo =NULL;
+
+char *mbrc;
+afinfo_t *p_afinfo = NULL;
+unsigned int card_total_size = 0;
+//unsigned char g_pcba_test_flag = 0; //for pcba test, disable by default.
+
+//int sd2_pcba_test = 1;
+//EXPORT_SYMBOL(sd2_pcba_test);
+
+struct mmc_blk_data tSD_device_md =
+{
+ .name = "tsd_card",
+ .major = 93,
+ .minorbits = 3,
+ .usage = 1,
+ .partitions = NULL,
+};
+
+unsigned int miscinfo_start_addr; //in secters
+
+#define MISC_INFO_WR 0
+#define MISC_INFO_RD 1
+#define MISC_INFO_SECTERS 2048 // 1M byte
+
+#define DRM_KEY_SIZE 64
+#define HDCP_KEY_SIZE 308 //must aligned to 2bytes
+#define SN_SIZE 32
+#define DEVNUM_SIZE 32
+#define EXTSPACE_SIZE 4096 //4
+struct MiscInfoBlk_t MiscInfo = {
+ .die = 0xffff,
+ .sblk = 0xffff,
+ .sblkBak = 0xffff,
+ .TotalSize = sizeof(struct MiscInfoBlk_t) + SN_SIZE + DRM_KEY_SIZE + HDCP_KEY_SIZE \
+ + DEVNUM_SIZE + EXTSPACE_SIZE,
+ .Burn = 0 ,
+
+ .Drm = {
+ .Magic = 0x55,
+ .InfoType = 1,
+ .Size = DRM_KEY_SIZE,
+ .Name = "KEY DRM\0",
+ .Burn = 0 ,
+ },
+ .Hdcp = {
+ .Magic = 0x55,
+ .InfoType = 2,
+ .Size = HDCP_KEY_SIZE,
+ .Name = "KEY HDCP\0",
+ .Burn = 0 ,
+ },
+ .Sn = {
+ .Magic = 0x55,
+ .InfoType = 0,
+ .Size = SN_SIZE,
+ .Name = "SN\0",
+ .Burn = 0 ,
+ },
+ .DevNum = {
+ .Magic = 0x55,
+ .InfoType = 0,
+ .Size = DEVNUM_SIZE,
+ .Name = "DEV NUM\0",
+ .Burn = 0 ,
+ },
+ .ExtSpace = {
+ .Magic = 0x55,
+ .InfoType = 0,
+ .Size = EXTSPACE_SIZE,
+ .Name = "EXTSPACE\0",
+ .Burn = 0 ,
+ },
+
+};
+
+
+
+static struct semaphore miscMutex;
+static void _miscMetuxInit(void)
+{
+ sema_init(&miscMutex, 1);
+}
+
+static void _miscMetuxLock(void)
+{
+ down(&miscMutex);
+}
+
+static void _miscMetuxUnlock(void)
+{
+ up(&miscMutex);
+}
+
+static int do_rw_miscinfo (unsigned int offset, char *buf, int size, int wr_flag);
+static int owl_miscinfo_is_burn(void);
+unsigned int tSD_op_read(unsigned long start, unsigned long nsector,
+ void *buf, struct inode * i);
+unsigned int tSD_op_write(unsigned long start, unsigned long nsector,
+ void *buf, struct inode * i);
+int tSD_pre_data_transfer(unsigned start, unsigned nsector,
+ void *buf, unsigned blksz, int write);
+int tSD_adfu_read(unsigned long start, unsigned long nsector, void *buf,
+ struct uparam * i);
+int tSD_adfu_write(unsigned long start, unsigned long nsector, void *buf,
+ struct uparam * i);
+int __do_adfu_read(unsigned start, unsigned nsector, void *buf);
+int __do_adfu_write(unsigned start, unsigned nsector, void *buf);
+int tSD_data_transfer(struct mmc_card *card, unsigned char *buf,
+ unsigned start, unsigned blocks, unsigned blksz, int write);
+static void tSD_prepare_mrq(struct mmc_card *card, unsigned char *buf,
+ struct mmc_request *mrq, unsigned start, unsigned blocks, unsigned blksz, int write);
+static int tSD_wait_busy(struct mmc_card *card);
+static int tSD_test_busy(struct mmc_command *cmd);
+static int tSD_test_check_result(struct mmc_card *card, struct mmc_request *mrq);
+
+int tSD_single_blk_test(unsigned start, unsigned blocks);
+
+static int tSD_queue_init(struct mmc_blk_data *tSD_device);
+static int tSD_partition_init(struct mmc_blk_data *tSD_device, int part_num);
+static int owl_hdcp_is_burn(void);
+static int tSD_blk_ioctl(struct block_device * bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
+int get_boot_media_info(unsigned int arg);
+int boot_operation(unsigned int arg, unsigned int cmd);
+int boot_phy_op_entry(boot_op_t * op, unsigned int cmd);
+unsigned convert_to_sector(unsigned blk, unsigned page);
+static unsigned int tSD_prep_sector(struct request *req);
+
+int init_board_cfg(void);
+int get_afi_configuration(void);
+partition_info_t *GetMbrFromUser(void);
+unsigned int GetAfiFromUser(afinfo_t * afinfo);
+unsigned int tSD_get_logic_cat(void);
+int init_tSD_part_myself(void);
+static unsigned int get_cap_offset(int part);
+unsigned int UpdateMbrToUsr(partition_info_t * partition_info_tbl, afinfo_t *p_afi);
+unsigned int UpdateMbrFromPhyToUsr(unsigned int *p_nand_part, mbr_info_t *p_mbr_info);
+unsigned int ReadAfinfo(void);
+unsigned int calCRC(unsigned char *buf, unsigned int length, unsigned char nBytes);
+int NAND_GetMiscInfo(int type, char *buf, int size);
+int NAND_WriteMiscInfo(int type, char *buf, int size);
+void NAND_InitMiscInfo(void);
+int handle_misc_info(unsigned int arg);
+int calculate_part_num(void);
+int owl_set_carddev_match_name(void);
+
+
+
+
+ // external functions defined in adfus.ko
+typedef unsigned int (*func_t)(unsigned int *p_nand_part, mbr_info_t *p_mbr_info);
+typedef void (*func_t1)(void);
+typedef int (*func_t4)(unsigned long, unsigned long , void *, struct uparam *);
+extern func_t AdfuUpdateMbrFromPhyToUsr;
+extern func_t1 adfu_flush_nand_cache;
+extern func_t4 adfus_nand_read;
+extern func_t4 adfus_nand_write;
+
+
+extern void dump_mem(void *startaddr, unsigned int size,
+ unsigned int showaddr, unsigned int show_bytes);
+
+
+static DEFINE_MUTEX(open_lock);
+
+enum {
+ MMC_PACKED_NR_IDX = -1,
+ MMC_PACKED_NR_ZERO,
+ MMC_PACKED_NR_SINGLE,
+};
+
+module_param(perdev_minors, int, 0444);
+MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+
+struct secure_storage card_secure =
+{
+ .name = "card",
+ .read_data = NAND_GetMiscInfo,
+ .write_data = NAND_WriteMiscInfo,
+};
+
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md);
+static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+
+static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
+{
+ struct mmc_packed *packed = mqrq->packed;
+
+ BUG_ON(!packed);
+
+ mqrq->cmd_type = MMC_PACKED_NONE;
+ packed->nr_entries = MMC_PACKED_NR_ZERO;
+ packed->idx_failure = MMC_PACKED_NR_IDX;
+ packed->retries = 0;
+ packed->blocks = 0;
+}
+
+#if 0
+static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
+{
+ struct mmc_blk_data *md;
+
+ mutex_lock(&open_lock);
+ md = disk->private_data;
+ if (md && md->usage == 0)
+ md = NULL;
+ if (md)
+ md->usage++;
+ mutex_unlock(&open_lock);
+
+ return md;
+}
+
+#endif
+static inline int mmc_get_devidx(struct gendisk *disk)
+{
+ int devidx = disk->first_minor / perdev_minors;
+ return devidx;
+}
+
+#if 0
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+ mutex_lock(&open_lock);
+ md->usage--;
+ if (md->usage == 0) {
+ int devidx = mmc_get_devidx(md->disk);
+ blk_cleanup_queue(md->queue.queue);
+
+ __clear_bit(devidx, dev_use);
+
+ put_disk(md->disk);
+ kfree(md);
+ }
+ mutex_unlock(&open_lock);
+}
+#endif
+#if 0
+static void tSD_blk_put(struct mmc_blk_data *tSD_device)
+{
+ int i;
+ mutex_lock(&open_lock);
+ tSD_device->usage--;
+ if(tSD_device->usage == 0){
+ blk_cleanup_queue(tSD_device->squeue.queue);
+ blk_cleanup_queue(tSD_device->dqueue.queue);
+ blk_cleanup_queue(tSD_device->uqueue.queue);
+
+ for(i = 0; i < partition__logic_incard; i++)
+ {
+ put_disk(tSD_device->partitions[i].disk);
+ }
+ }
+ mutex_unlock(&open_lock);
+}
+#endif
+#if 0
+static ssize_t power_ro_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ int locked = 0;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+ locked = 2;
+ else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+ locked = 1;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+
+ return ret;
+}
+
+static ssize_t power_ro_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct mmc_blk_data *md, *part_md;
+ struct mmc_card *card;
+ unsigned long set;
+
+ if (kstrtoul(buf, 0, &set))
+ return -EINVAL;
+
+ if (set != 1)
+ return count;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ card = md->queue.card;
+
+ mmc_claim_host(card->host);
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+ card->ext_csd.boot_ro_lock |
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
+ else
+ card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+
+ mmc_release_host(card->host);
+
+ if (!ret) {
+ pr_info("%s: Locking boot partition ro until next power on\n",
+ md->disk->disk_name);
+ set_disk_ro(md->disk, 1);
+
+ list_for_each_entry(part_md, &md->part, part)
+ if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
+ pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
+ set_disk_ro(part_md->disk, 1);
+ }
+ }
+
+ mmc_blk_put(md);
+ return count;
+}
+
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+ ret = snprintf(buf, PAGE_SIZE, "%d",
+ get_disk_ro(dev_to_disk(dev)) ^
+ md->read_only);
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ char *end;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ unsigned long set = simple_strtoul(buf, &end, 0);
+ if (end == buf) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ set_disk_ro(dev_to_disk(dev), set || md->read_only);
+ ret = count;
+out:
+ mmc_blk_put(md);
+ return ret;
+}
+
+static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
+{
+ struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ mutex_lock(&block_mutex);
+ if (md) {
+ if (md->usage == 2)
+ check_disk_change(bdev);
+ ret = 0;
+
+ if ((mode & FMODE_WRITE) && md->read_only) {
+ mmc_blk_put(md);
+ ret = -EROFS;
+ }
+ }
+ mutex_unlock(&block_mutex);
+
+ return ret;
+}
+#endif
+static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
+{
+ return ;
+#if 0
+ struct mmc_blk_data *md = disk->private_data;
+
+ mutex_lock(&block_mutex);
+ mmc_blk_put(md);
+ mutex_unlock(&block_mutex);
+#endif
+}
+
+
+static int
+mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ //geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ //geo->heads = 4;
+ //geo->sectors = 16;
+ return 0;
+}
+
+struct mmc_blk_ioc_data {
+ struct mmc_ioc_cmd ic;
+ unsigned char *buf;
+ u64 buf_bytes;
+};
+
+#if 0
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+ struct mmc_ioc_cmd __user *user)
+{
+ struct mmc_blk_ioc_data *idata;
+ int err;
+
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+ err = -EFAULT;
+ goto idata_err;
+ }
+
+ idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+ if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+ err = -EOVERFLOW;
+ goto idata_err;
+ }
+
+ if (!idata->buf_bytes)
+ return idata;
+
+ idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
+ if (!idata->buf) {
+ err = -ENOMEM;
+ goto idata_err;
+ }
+
+ if (copy_from_user(idata->buf, (void __user *)(unsigned long)
+ idata->ic.data_ptr, idata->buf_bytes)) {
+ err = -EFAULT;
+ goto copy_err;
+ }
+
+ return idata;
+
+copy_err:
+ kfree(idata->buf);
+idata_err:
+ kfree(idata);
+out:
+ return ERR_PTR(err);
+}
+
+static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
+ u32 retries_max)
+{
+ int err;
+ u32 retry_count = 0;
+
+ if (!status || !retries_max)
+ return -EINVAL;
+
+ do {
+ err = get_card_status(card, status, 5);
+ if (err)
+ break;
+
+ if (!R1_STATUS(*status) &&
+ (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
+ break; /* RPMB programming operation complete */
+
+ /*
+ * Rechedule to give the MMC device a chance to continue
+ * processing the previous command without being polled too
+ * frequently.
+ */
+ usleep_range(1000, 5000);
+ } while (++retry_count < retries_max);
+
+ if (retry_count == retries_max)
+ err = -EPERM;
+
+ return err;
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ struct mmc_ioc_cmd __user *ic_ptr)
+{
+ struct mmc_blk_ioc_data *idata;
+ struct mmc_blk_data *md;
+ struct mmc_card *card;
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct mmc_request mrq = {NULL};
+ struct scatterlist sg;
+ int err;
+ int is_rpmb = false;
+ u32 status = 0;
+
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
+ idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+ if (IS_ERR(idata))
+ return PTR_ERR(idata);
+
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md) {
+ err = -EINVAL;
+ goto cmd_err;
+ }
+
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ is_rpmb = true;
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto cmd_done;
+ }
+
+ cmd.opcode = idata->ic.opcode;
+ cmd.arg = idata->ic.arg;
+ cmd.flags = idata->ic.flags;
+
+ if (idata->buf_bytes) {
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = idata->ic.blksz;
+ data.blocks = idata->ic.blocks;
+
+ sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+ if (idata->ic.write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+
+ /* Allow overriding the timeout_ns for empirical tuning. */
+ if (idata->ic.data_timeout_ns)
+ data.timeout_ns = idata->ic.data_timeout_ns;
+
+ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+ * Pretend this is a data transfer and rely on the
+ * host driver to compute timeout. When all host
+ * drivers support cmd.cmd_timeout for R1B, this
+ * can be changed to:
+ *
+ * mrq.data = NULL;
+ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+ */
+ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+ }
+
+ mrq.data = &data;
+ }
+
+ mrq.cmd = &cmd;
+
+ mmc_claim_host(card->host);
+
+ err = mmc_blk_part_switch(card, md);
+ if (err)
+ goto cmd_rel_host;
+
+ if (idata->ic.is_acmd) {
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ goto cmd_rel_host;
+ }
+
+ if (is_rpmb) {
+ err = mmc_set_blockcount(card, data.blocks,
+ idata->ic.write_flag & (1 << 31));
+ if (err)
+ goto cmd_rel_host;
+ }
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+ err = cmd.error;
+ goto cmd_rel_host;
+ }
+ if (data.error) {
+ dev_err(mmc_dev(card->host), "%s: data error %d\n",
+ __func__, data.error);
+ err = data.error;
+ goto cmd_rel_host;
+ }
+
+ /*
+ * According to the SD specs, some commands require a delay after
+ * issuing the command.
+ */
+ if (idata->ic.postsleep_min_us)
+ usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+ if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+
+ if (!idata->ic.write_flag) {
+ if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
+ idata->buf, idata->buf_bytes)) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+ }
+
+ if (is_rpmb) {
+ /*
+ * Ensure RPMB command has completed by polling CMD13
+ * "Send Status".
+ */
+ err = ioctl_rpmb_card_status_poll(card, &status, 5);
+ if (err)
+ dev_err(mmc_dev(card->host),
+ "%s: Card Status=0x%08X, error %d\n",
+ __func__, status, err);
+ }
+
+cmd_rel_host:
+ mmc_release_host(card->host);
+
+cmd_done:
+ mmc_blk_put(md);
+cmd_err:
+ kfree(idata->buf);
+ kfree(idata);
+ return err;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -EINVAL;
+ if (cmd == MMC_IOC_CMD)
+ ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+ return ret;
+}
+
+
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+#endif
+static void tSD_op_flush(void)
+{
+
+}
+
+static const struct block_device_operations mmc_bdops = {
+// .open = mmc_blk_open,
+ .release = mmc_blk_release,
+ .getgeo = mmc_blk_getgeo,
+ .owner = THIS_MODULE,
+// .ioctl = mmc_blk_ioctl,
+ .ioctl = tSD_blk_ioctl,
+ .blk_read = tSD_op_read,
+ .blk_write = tSD_op_write,
+ .flush_disk_cache = tSD_op_flush,
+#ifdef CONFIG_COMPAT
+// .compat_ioctl = mmc_blk_compat_ioctl,
+#endif
+};
+
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ int ret;
+ struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+
+ if (main_md->part_curr == md->part_type)
+ return 0;
+
+ if (mmc_card_mmc(card)) {
+ u8 part_config = card->ext_csd.part_config;
+
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ part_config |= md->part_type;
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG, part_config,
+ card->ext_csd.part_time);
+ if (ret)
+ return ret;
+
+ card->ext_csd.part_config = part_config;
+ }
+
+ main_md->part_curr = md->part_type;
+ return 0;
+}
+
+static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+{
+ int err;
+ u32 result;
+ __be32 *blocks;
+
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+
+ struct scatterlist sg;
+
+ cmd.opcode = MMC_APP_CMD;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err)
+ return (u32)-1;
+ if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
+ return (u32)-1;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = 4;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ blocks = kmalloc(4, GFP_KERNEL);
+ if (!blocks)
+ return (u32)-1;
+
+ sg_init_one(&sg, blocks, 4);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ result = ntohl(*blocks);
+ kfree(blocks);
+
+ if (cmd.error || data.error)
+ result = (u32)-1;
+
+ return result;
+}
+
+static int send_stop(struct mmc_card *card, u32 *status)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(card->host))
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, retries);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+#define ERR_NOMEDIUM 3
+#define ERR_RETRY 2
+#define ERR_ABORT 1
+#define ERR_CONTINUE 0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+ bool status_valid, u32 status)
+{
+ switch (error) {
+ case -EILSEQ:
+ /* response crc error, retry the r/w cmd */
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "response CRC error",
+ name, status);
+ return ERR_RETRY;
+
+ case -ETIMEDOUT:
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "timed out", name, status);
+
+ /* If the status cmd initially failed, retry the r/w cmd */
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
+ return ERR_RETRY;
+ }
+ /*
+ * If it was a r/w cmd crc error, or illegal command
+ * (eg, issued in wrong state) then retry - we should
+ * have corrected the state problem above.
+ */
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
+ return ERR_RETRY;
+ }
+
+ /* Otherwise abort the command */
+ pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
+ return ERR_ABORT;
+
+ default:
+ /* We don't understand the error code the driver gave us */
+ pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+ req->rq_disk->disk_name, error, status);
+ return ERR_ABORT;
+ }
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state. Essentially, we do this as follows:
+ * - Obtain card status. If the first attempt to obtain card status fails,
+ * the status word will reflect the failed status cmd, not the failed
+ * r/w cmd. If we fail to obtain card status, it suggests we can no
+ * longer communicate with the card.
+ * - Check the card state. If the card received the cmd but there was a
+ * transient problem with the response, it might still be in a data transfer
+ * mode. Try to send it a stop command. If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ * transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ * illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+{
+ bool prev_cmd_status_valid = true;
+ u32 status, stop_status = 0;
+ int err, retry;
+
+ if (mmc_card_removed(card))
+ return ERR_NOMEDIUM;
+
+ /*
+ * Try to get card status which indicates both the card state
+ * and why there was no response. If the first attempt fails,
+ * we can't be sure the returned status is for the r/w command.
+ */
+ for (retry = 2; retry >= 0; retry--) {
+ err = get_card_status(card, &status, 0);
+ if (!err)
+ break;
+
+ prev_cmd_status_valid = false;
+ pr_err("%s: error %d sending status command, %sing\n",
+ req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+ }
+
+ /* We couldn't get a response from the card. Give up. */
+ if (err) {
+ /* Check if the card is removed */
+ if (mmc_detect_card_removed(card->host))
+ return ERR_NOMEDIUM;
+ return ERR_ABORT;
+ }
+
+ /* Flag ECC errors */
+ if ((status & R1_CARD_ECC_FAILED) ||
+ (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
+ (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+ *ecc_err = 1;
+
+ /* Flag General errors */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if ((status & R1_ERROR) ||
+ (brq->stop.resp[0] & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0], status);
+ *gen_err = 1;
+ }
+
+ /*
+ * Check the current card state. If it is in some data transfer
+ * mode, tell it to stop (and hopefully transition back to TRAN.)
+ */
+ if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+ R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+ err = send_stop(card, &stop_status);
+ if (err)
+ pr_err("%s: error %d sending stop command\n",
+ req->rq_disk->disk_name, err);
+
+ /*
+ * If the stop cmd also timed out, the card is probably
+ * not present, so abort. Other errors are bad news too.
+ */
+ if (err)
+ return ERR_ABORT;
+ if (stop_status & R1_CARD_ECC_FAILED)
+ *ecc_err = 1;
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if (stop_status & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ stop_status);
+ *gen_err = 1;
+ }
+ }
+
+ /* Check for set block count errors */
+ if (brq->sbc.error)
+ return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+ prev_cmd_status_valid, status);
+
+ /* Check for r/w command errors */
+ if (brq->cmd.error)
+ return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+ prev_cmd_status_valid, status);
+
+ /* Data errors */
+ if (!brq->stop.error)
+ return ERR_CONTINUE;
+
+ /* Now for stop errors. These aren't fatal to the transfer. */
+ pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->stop.error,
+ brq->cmd.resp[0], status);
+
+ /*
+ * Subsitute in our own stop status as this will give the error
+ * state which happened during the execution of the r/w command.
+ */
+ if (stop_status) {
+ brq->stop.resp[0] = stop_status;
+ brq->stop.error = 0;
+ }
+ return ERR_CONTINUE;
+}
+
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+ int type)
+{
+ int err;
+
+ if (md->reset_done & type)
+ return -EEXIST;
+
+ md->reset_done |= type;
+ err = mmc_hw_reset(host);
+ /* Ensure we switch back to the correct partition */
+ if (err != -EOPNOTSUPP) {
+ struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
+ int part_err;
+
+ main_md->part_curr = main_md->part_type;
+ part_err = mmc_blk_part_switch(host->card, md);
+ if (part_err) {
+ /*
+ * We have failed to get back into the correct
+ * partition, so we need to abort the whole request.
+ */
+ return -ENODEV;
+ }
+ }
+ return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+ md->reset_done &= ~type;
+}
+
+static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue->card;
+
+ unsigned int from, nr, arg;
+ int err = 0, type = MMC_BLK_DISCARD;
+
+
+ if (!mmc_can_erase(card)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ from = tSD_prep_sector(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_discard(card)){
+ arg = MMC_DISCARD_ARG;
+ }
+
+ else if (mmc_can_trim(card)){
+ arg = MMC_TRIM_ARG;
+ }
+
+ else{
+ arg = MMC_ERASE_ARG;
+ }
+
+retry:
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ 0);
+ if (err)
+ goto out;
+ }
+ err = mmc_erase(card, from, nr, arg);
+out:
+ if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
+ blk_end_request(req, err, blk_rq_bytes(req));
+
+ return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+ struct request *req)
+
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue->card;
+
+ unsigned int from, nr, arg, trim_arg, erase_arg;
+ int err = 0, type = MMC_BLK_SECDISCARD;
+
+ if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ from = tSD_prep_sector(req);
+ nr = blk_rq_sectors(req);
+
+ /* The sanitize operation is supported at v4.5 only */
+ if (mmc_can_sanitize(card)) {
+ erase_arg = MMC_ERASE_ARG;
+ trim_arg = MMC_TRIM_ARG;
+ } else {
+ erase_arg = MMC_SECURE_ERASE_ARG;
+ trim_arg = MMC_SECURE_TRIM1_ARG;
+ }
+
+ if (mmc_erase_group_aligned(card, from, nr))
+ arg = erase_arg;
+ else if (mmc_can_trim(card))
+ arg = trim_arg;
+ else {
+ err = -EINVAL;
+ goto out;
+ }
+retry:
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_SECURE_TRIM1_ARG ?
+ INAND_CMD38_ARG_SECTRIM1 :
+ INAND_CMD38_ARG_SECERASE,
+ 0);
+ if (err)
+ goto out_retry;
+ }
+
+ err = mmc_erase(card, from, nr, arg);
+ if (err == -EIO)
+ goto out_retry;
+ if (err)
+ goto out;
+
+ if (arg == MMC_SECURE_TRIM1_ARG) {
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ INAND_CMD38_ARG_SECTRIM2,
+ 0);
+ if (err)
+ goto out_retry;
+ }
+
+ err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+ if (err == -EIO)
+ goto out_retry;
+ if (err)
+ goto out;
+ }
+
+ if (mmc_can_sanitize(card)) {
+// trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1, 0);
+// trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
+ }
+out_retry:
+ if (err && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
+out:
+ blk_end_request(req, err, blk_rq_bytes(req));
+
+ return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ printk("%s,%d\n",__FUNCTION__,__LINE__);
+ return 0;
+#if 0
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int ret = 0;
+
+ ret = mmc_flush_cache(card);
+ if (ret)
+ ret = -EIO;
+
+ blk_end_request_all(req, ret);
+
+ return ret ? 0 : 1;
+#endif
+}
+
+/*
+ * Reformat current write as a reliable write, supporting
+ * both legacy and the enhanced reliable write MMC cards.
+ * In each transfer we'll handle only as much as a single
+ * reliable write can handle, thus finish the request in
+ * partial completions.
+ */
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+ struct mmc_card *card,
+ struct request *req)
+{
+ printk("%s,%d\n",__FUNCTION__,__LINE__);
+#if 0
+ if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+ /* Legacy mode imposes restrictions on transfers. */
+ if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
+ brq->data.blocks = 1;
+
+ if (brq->data.blocks > card->ext_csd.rel_sectors)
+ brq->data.blocks = card->ext_csd.rel_sectors;
+ else if (brq->data.blocks < card->ext_csd.rel_sectors)
+ brq->data.blocks = 1;
+ }
+#endif
+}
+
+#define CMD_ERRORS \
+ (R1_OUT_OF_RANGE | /* Command argument out of range */ \
+ R1_ADDRESS_ERROR | /* Misaligned address */ \
+ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
+ R1_WP_VIOLATION | /* Tried to write to protected block */ \
+ R1_CC_ERROR | /* Card controller error */ \
+ R1_ERROR) /* General/unknown error */
+
+static int mmc_blk_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct mmc_blk_request *brq = &mq_mrq->brq;
+ struct request *req = mq_mrq->req;
+ int ecc_err = 0, gen_err = 0;
+
+ /*
+ * sbc.error indicates a problem with the set block count
+ * command. No data will have been transferred.
+ *
+ * cmd.error indicates a problem with the r/w command. No
+ * data will have been transferred.
+ *
+ * stop.error indicates a problem with the stop command. Data
+ * may have been transferred, or may still be transferring.
+ */
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error) {
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
+ case ERR_RETRY:
+ return MMC_BLK_RETRY;
+ case ERR_ABORT:
+ return MMC_BLK_ABORT;
+ case ERR_NOMEDIUM:
+ return MMC_BLK_NOMEDIUM;
+ case ERR_CONTINUE:
+ break;
+ }
+ }
+
+ /*
+ * Check for errors relating to the execution of the
+ * initial command - such as address errors. No data
+ * has been transferred.
+ */
+ if (brq->cmd.resp[0] & CMD_ERRORS) {
+ pr_err("%s: r/w command failed, status = %#x\n",
+ req->rq_disk->disk_name, brq->cmd.resp[0]);
+ return MMC_BLK_ABORT;
+ }
+
+ /*
+ * Everything else is either success, or a data error of some
+ * kind. If it was a write, we may have transitioned to
+ * program mode, which we have to wait for it to complete.
+ */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ u32 status;
+ unsigned long timeout;
+
+ /* Check stop command response */
+ if (brq->stop.resp[0] & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0]);
+ gen_err = 1;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
+ do {
+ int err = get_card_status(card, &status, 5);
+ if (err) {
+ pr_err("%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_CMD_ERR;
+ }
+
+ if (status & R1_ERROR) {
+ pr_err("%s: %s: general error sending status command, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ status);
+ gen_err = 1;
+ }
+
+ /* Timeout if the device never becomes ready for data
+ * and never leaves the program state.
+ */
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: Card stuck in programming state!"\
+ " %s %s\n", mmc_hostname(card->host),
+ req->rq_disk->disk_name, __func__);
+
+ return MMC_BLK_CMD_ERR;
+ }
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ }
+
+ /* if general error occurs, retry the write operation. */
+ if (gen_err) {
+ pr_warn("%s: retrying write for general error\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY;
+ }
+
+ if (brq->data.error) {
+ pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->data.error,
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req),
+ brq->cmd.resp[0], brq->stop.resp[0]);
+
+ if (rq_data_dir(req) == READ) {
+ if (ecc_err)
+ return MMC_BLK_ECC_ERR;
+ return MMC_BLK_DATA_ERR;
+ } else {
+ return MMC_BLK_CMD_ERR;
+ }
+ }
+
+ if (!brq->data.bytes_xfered)
+ return MMC_BLK_RETRY;
+
+ if (mmc_packed_cmd(mq_mrq->cmd_type)) {
+ if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
+ return MMC_BLK_PARTIAL;
+ else
+ return MMC_BLK_SUCCESS;
+ }
+
+ if (blk_rq_bytes(req) != brq->data.bytes_xfered)
+ return MMC_BLK_PARTIAL;
+
+ return MMC_BLK_SUCCESS;
+}
+
+static int mmc_blk_packed_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request *req = mq_rq->req;
+ struct mmc_packed *packed = mq_rq->packed;
+ int err, check, status;
+ u8 *ext_csd;
+
+ BUG_ON(!packed);
+
+ packed->retries--;
+ check = mmc_blk_err_check(card, areq);
+ err = get_card_status(card, &status, 0);
+ if (err) {
+ pr_err("%s: error %d sending status command\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if (status & R1_EXCEPTION_EVENT) {
+ ext_csd = kzalloc(512, GFP_KERNEL);
+ if (!ext_csd) {
+ pr_err("%s: unable to allocate buffer for ext_csd\n",
+ req->rq_disk->disk_name);
+ return -ENOMEM;
+ }
+
+ err = mmc_send_ext_csd(card, ext_csd);
+ if (err) {
+ pr_err("%s: error %d sending ext_csd\n",
+ req->rq_disk->disk_name, err);
+ check = MMC_BLK_ABORT;
+ goto free;
+ }
+
+ if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+ EXT_CSD_PACKED_FAILURE) &&
+ (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_GENERIC_ERROR)) {
+ if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_INDEXED_ERROR) {
+ packed->idx_failure =
+ ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
+ check = MMC_BLK_PARTIAL;
+ }
+ pr_err("%s: packed cmd failed, nr %u, sectors %u, "
+ "failure index: %d\n",
+ req->rq_disk->disk_name, packed->nr_entries,
+ packed->blocks, packed->idx_failure);
+ }
+free:
+ kfree(ext_csd);
+ }
+
+ return check;
+}
+
+static unsigned int tSD_prep_sector(struct request *req)
+{
+ struct gendisk * gd;
+ unsigned int part_no;
+ unsigned int start;
+ unsigned int nsector;
+
+ gd = req->rq_disk;
+ part_no = gd->first_minor >> 3;
+ start = blk_rq_pos(req);
+ nsector = blk_rq_sectors(req);
+
+ //printk("%s gendisk[%d] start:0x%x, nsector:0x%x ,get_capacity(gd) high:0x%x get_capacity(gd) low:0x%x!\n",__FUNCTION__,part_no, start, nsector, (unsigned int)(get_capacity(gd)>>32), (unsigned int)get_capacity(gd));
+
+ if(nsector <= (get_capacity(gd) - start))
+ {
+ start += tSD_device_md.partitions[part_no].offset;
+ start += SEC_PHY_BLOCK;
+// UPGRADE_DBG_INF("start = %u, part_no = %d \n", start, part_no);
+ }
+ else
+ {
+ printk("%s err!gendisk[%d] over limit start:0x%x, nsector:0x%x , get_capacity(gd) high:0x%x get_capacity(gd) low:0x%x!\n",__FUNCTION__,part_no, start, nsector, (unsigned int)(get_capacity(gd)>>32), (unsigned int)get_capacity(gd));
+ //return -1;
+ //todo
+ }
+
+ return start;
+}
+
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int disable_multi,
+ struct mmc_queue *mq)
+{
+ u32 readcmd, writecmd;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct mmc_blk_data *md = mq->data;
+ bool do_data_tag;
+ unsigned int start;
+
+ /*
+ * Reliable writes are used to implement Forced Unit Access and
+ * REQ_META accesses, and are supported only on MMCs.
+ *
+ * XXX: this really needs a good explanation of why REQ_META
+ * is treated special.
+ */
+ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+ (req->cmd_flags & REQ_META)) &&
+ (rq_data_dir(req) == WRITE) &&
+ (md->flags & MMC_BLK_REL_WR);
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+
+ //brq->cmd.arg = blk_rq_pos(req);
+ /* add for mmc partion*/
+ start = tSD_prep_sector(req);
+ brq->cmd.arg = start;
+
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ brq->data.blocks = blk_rq_sectors(req);
+
+ /*
+ * The block layer doesn't support all sector count
+ * restrictions, so we need to be prepared for too big
+ * requests.
+ */
+ if (brq->data.blocks > card->host->max_blk_count)
+ brq->data.blocks = card->host->max_blk_count;
+
+ if (brq->data.blocks > 1) {
+ /*
+ * After a read error, we redo the request one sector
+ * at a time in order to accurately determine which
+ * sectors can be read successfully.
+ */
+ if (disable_multi)
+ brq->data.blocks = 1;
+
+ /* Some controllers can't do multiblock reads due to hw bugs */
+ if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
+ rq_data_dir(req) == READ)
+ brq->data.blocks = 1;
+ }
+
+ if (brq->data.blocks > 1 || do_rel_wr) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
+ */
+ if (!mmc_host_is_spi(card->host) ||
+ rq_data_dir(req) == READ)
+ brq->mrq.stop = &brq->stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ } else {
+ brq->mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+ if (rq_data_dir(req) == READ) {
+ brq->cmd.opcode = readcmd;
+ brq->data.flags |= MMC_DATA_READ;
+ } else {
+ brq->cmd.opcode = writecmd;
+ brq->data.flags |= MMC_DATA_WRITE;
+ }
+
+ if (do_rel_wr)
+ mmc_apply_rel_rw(brq, card, req);
+
+ /*
+ * Data tag is used only during writing meta data to speed
+ * up write and any subsequent read of this meta data
+ */
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
+ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
+ do_data_tag)) {
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = brq->data.blocks |
+ (do_rel_wr ? (1 << 31) : 0) |
+ (do_data_tag ? (1 << 29) : 0);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq->mrq.sbc = &brq->sbc;
+ }
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
+ }
+ }
+ brq->data.sg_len = i;
+ }
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
+ unsigned int max_seg_sz = queue_max_segment_size(q);
+ unsigned int len, nr_segs = 0;
+
+ do {
+ len = min(hdr_sz, max_seg_sz);
+ hdr_sz -= len;
+ nr_segs++;
+ } while (hdr_sz);
+
+ return nr_segs;
+}
+
+static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
+ struct request *cur = req, *next = NULL;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_queue_req *mqrq = mq->mqrq_cur;
+ bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
+ unsigned int req_sectors = 0, phys_segments = 0;
+ unsigned int max_blk_count, max_phys_segs;
+ bool put_back = true;
+ u8 max_packed_rw = 0;
+ u8 reqs = 0;
+
+ if (!(md->flags & MMC_BLK_PACKED_CMD))
+ goto no_packed;
+
+ if ((rq_data_dir(cur) == WRITE) &&
+ mmc_host_packed_wr(card->host))
+ max_packed_rw = card->ext_csd.max_packed_writes;
+
+ if (max_packed_rw == 0)
+ goto no_packed;
+
+ if (mmc_req_rel_wr(cur) &&
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ goto no_packed;
+
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(cur), 8))
+ goto no_packed;
+
+ mmc_blk_clear_packed(mqrq);
+
+ max_blk_count = min(card->host->max_blk_count,
+ card->host->max_req_size >> 9);
+ if (unlikely(max_blk_count > 0xffff))
+ max_blk_count = 0xffff;
+
+ max_phys_segs = queue_max_segments(q);
+ req_sectors += blk_rq_sectors(cur);
+ phys_segments += cur->nr_phys_segments;
+
+ if (rq_data_dir(cur) == WRITE) {
+ req_sectors += mmc_large_sector(card) ? 8 : 1;
+ phys_segments += mmc_calc_packed_hdr_segs(q, card);
+ }
+
+ do {
+ if (reqs >= max_packed_rw - 1) {
+ put_back = false;
+ break;
+ }
+
+ spin_lock_irq(q->queue_lock);
+ next = blk_fetch_request(q);
+ spin_unlock_irq(q->queue_lock);
+ if (!next) {
+ put_back = false;
+ break;
+ }
+
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(next), 8))
+ break;
+
+ if (next->cmd_flags & REQ_DISCARD ||
+ next->cmd_flags & REQ_FLUSH)
+ break;
+
+ if (rq_data_dir(cur) != rq_data_dir(next))
+ break;
+
+ if (mmc_req_rel_wr(next) &&
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ break;
+
+ req_sectors += blk_rq_sectors(next);
+ if (req_sectors > max_blk_count)
+ break;
+
+ phys_segments += next->nr_phys_segments;
+ if (phys_segments > max_phys_segs)
+ break;
+
+ list_add_tail(&next->queuelist, &mqrq->packed->list);
+ cur = next;
+ reqs++;
+ } while (1);
+
+ if (put_back) {
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, next);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ if (reqs > 0) {
+ list_add(&req->queuelist, &mqrq->packed->list);
+ mqrq->packed->nr_entries = ++reqs;
+ mqrq->packed->retries = reqs;
+ return reqs;
+ }
+
+no_packed:
+ mqrq->cmd_type = MMC_PACKED_NONE;
+ return 0;
+}
+
+static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct request *prq;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_packed *packed = mqrq->packed;
+ bool do_rel_wr, do_data_tag;
+ u32 *packed_cmd_hdr;
+ u8 hdr_blocks;
+ u8 i = 1;
+
+ BUG_ON(!packed);
+
+ mqrq->cmd_type = MMC_PACKED_WRITE;
+ packed->blocks = 0;
+ packed->idx_failure = MMC_PACKED_NR_IDX;
+
+ packed_cmd_hdr = packed->cmd_hdr;
+ memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
+ packed_cmd_hdr[0] = (packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ hdr_blocks = mmc_large_sector(card) ? 8 : 1;
+
+ /*
+ * Argument for each entry of packed group
+ */
+ list_for_each_entry(prq, &packed->list, queuelist) {
+ do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (prq->cmd_flags & REQ_META) &&
+ (rq_data_dir(prq) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+ /* Argument of CMD23 */
+ packed_cmd_hdr[(i * 2)] =
+ (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
+ blk_rq_sectors(prq);
+ /* Argument of CMD18 or CMD25 */
+ packed_cmd_hdr[((i * 2)) + 1] =
+ mmc_card_blockaddr(card) ?
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ packed->blocks += blk_rq_sectors(prq);
+ i++;
+ }
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.sbc = &brq->sbc;
+ brq->mrq.stop = &brq->stop;
+
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ brq->data.blksz = 512;
+ brq->data.blocks = packed->blocks + hdr_blocks;
+ brq->data.flags |= MMC_DATA_WRITE;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+ struct mmc_blk_request *brq, struct request *req,
+ int ret)
+{
+ struct mmc_queue_req *mq_rq;
+ mq_rq = container_of(brq, struct mmc_queue_req, brq);
+
+ /*
+ * If this is an SD card and we're writing, we can first
+ * mark the known good sectors as ok.
+ *
+ * If the card is not SD, we can still ok written sectors
+ * as reported by the controller (which might be less than
+ * the real number of written sectors, but never more).
+ */
+ if (mmc_card_sd(card)) {
+ u32 blocks;
+
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1) {
+ ret = blk_end_request(req, 0, blocks << 9);
+ }
+ } else {
+ if (!mmc_packed_cmd(mq_rq->cmd_type))
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
+ }
+ return ret;
+}
+
+static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct mmc_packed *packed = mq_rq->packed;
+ int idx = packed->idx_failure, i = 0;
+ int ret = 0;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.next);
+ if (idx == i) {
+ /* retry from error index */
+ packed->nr_entries -= idx;
+ mq_rq->req = prq;
+ ret = 1;
+
+ if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
+ list_del_init(&prq->queuelist);
+ mmc_blk_clear_packed(mq_rq);
+ }
+ return ret;
+ }
+ list_del_init(&prq->queuelist);
+ blk_end_request(prq, 0, blk_rq_bytes(prq));
+ i++;
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+ return ret;
+}
+
+static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct mmc_packed *packed = mq_rq->packed;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.next);
+ list_del_init(&prq->queuelist);
+ blk_end_request(prq, -EIO, blk_rq_bytes(prq));
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+}
+
+static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
+ struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct request_queue *q = mq->queue;
+ struct mmc_packed *packed = mq_rq->packed;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.prev);
+ if (prq->queuelist.prev != &packed->list) {
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(mq->queue, prq);
+ spin_unlock_irq(q->queue_lock);
+ } else {
+ list_del_init(&prq->queuelist);
+ }
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+}
+
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue->card;
+ struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+ int ret = 1, disable_multi = 0, retry = 0, type;
+ enum mmc_blk_status status;
+ struct mmc_queue_req *mq_rq;
+ struct request *req = rqc;
+ struct mmc_async_req *areq;
+ const u8 packed_nr = 2;
+ u8 reqs = 0;
+
+ if (!rqc && !mq->mqrq_prev->req)
+ return 0;
+
+ if (rqc)
+ reqs = mmc_blk_prep_packed_list(mq, rqc);
+
+ do {
+ if (rqc) {
+ /*
+ * When 4KB native sector is enabled, only 8 blocks
+ * multiple read or write is allowed
+ */
+ if ((brq->data.blocks & 0x07) &&
+ (card->ext_csd.data_sector_size == 4096)) {
+ pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+ req->rq_disk->disk_name);
+ mq_rq = mq->mqrq_cur;
+ goto cmd_abort;
+ }
+
+ if (reqs >= packed_nr)
+ mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
+ card, mq);
+ else
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ areq = &mq->mqrq_cur->mmc_active;
+ } else
+ areq = NULL;
+ areq = mmc_start_req(card->host, areq, (int *) &status);
+ if (!areq) {
+ if (status == MMC_BLK_NEW_REQUEST)
+ mq->flags |= MMC_QUEUE_NEW_REQUEST;
+ return 0;
+ }
+
+ mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ brq = &mq_rq->brq;
+ req = mq_rq->req;
+ type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ mmc_queue_bounce_post(mq_rq);
+
+ switch (status) {
+ case MMC_BLK_SUCCESS:
+ case MMC_BLK_PARTIAL:
+ /*
+ * A block was successfully transferred.
+ */
+ mmc_blk_reset_success(md, type);
+
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ ret = mmc_blk_end_packed_req(mq_rq);
+ break;
+ } else {
+ ret = blk_end_request(req, 0,
+ brq->data.bytes_xfered);
+ }
+
+ /*
+ * If the blk_end_request function returns non-zero even
+ * though all data has been transferred and no errors
+ * were returned by the host controller, it's a bug.
+ */
+ if (status == MMC_BLK_SUCCESS && ret) {
+ pr_err("%s BUG rq_tot %d d_xfer %d\n",
+ __func__, blk_rq_bytes(req),
+ brq->data.bytes_xfered);
+ rqc = NULL;
+ goto cmd_abort;
+ }
+ break;
+ case MMC_BLK_CMD_ERR:
+ ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
+ goto cmd_abort;
+ case MMC_BLK_RETRY:
+ if (retry++ < 5)
+ break;
+ /* Fall through */
+ case MMC_BLK_ABORT:
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
+ goto cmd_abort;
+ case MMC_BLK_DATA_ERR: {
+ int err;
+
+ err = mmc_blk_reset(md, card->host, type);
+ if (!err)
+ break;
+ if (err == -ENODEV ||
+ mmc_packed_cmd(mq_rq->cmd_type))
+ goto cmd_abort;
+ /* Fall through */
+ }
+ case MMC_BLK_ECC_ERR:
+ if (brq->data.blocks > 1) {
+ /* Redo read one sector at a time */
+ pr_warning("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
+ disable_multi = 1;
+ break;
+ }
+ /*
+ * After an error, we redo I/O one sector at a
+ * time, so we only reach here after trying to
+ * read a single sector.
+ */
+ ret = blk_end_request(req, -EIO,
+ brq->data.blksz);
+ if (!ret)
+ goto start_new_req;
+ break;
+ case MMC_BLK_NOMEDIUM:
+ goto cmd_abort;
+ default:
+ pr_err("%s: Unhandled return value (%d)",
+ req->rq_disk->disk_name, status);
+ goto cmd_abort;
+ }
+
+ if (ret) {
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ if (!mq_rq->packed->retries)
+ goto cmd_abort;
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ } else {
+
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card,
+ disable_multi, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ }
+ }
+ } while (ret);
+
+ return 1;
+
+ cmd_abort:
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ mmc_blk_abort_packed_req(mq_rq);
+ } else {
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
+ while (ret)
+ ret = blk_end_request(req, -EIO,
+ blk_rq_cur_bytes(req));
+ }
+
+ start_new_req:
+ if (rqc) {
+ if (mmc_card_removed(card)) {
+ rqc->cmd_flags |= REQ_QUIET;
+ blk_end_request_all(rqc, -EIO);
+ } else {
+ /*
+ * If current request is packed, it needs to put back.
+ */
+ if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
+ mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
+
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_start_req(card->host,
+ &mq->mqrq_cur->mmc_active, NULL);
+ }
+ }
+
+ return 0;
+}
+
+static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+ int ret;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue->card;
+ struct mmc_host *host = card->host;
+ unsigned long flags;
+ unsigned int cmd_flags = req ? req->cmd_flags : 0;
+
+#if 0
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host))
+ mmc_resume_bus(card->host);
+#endif
+
+ if (req && !mq->mqrq_prev->req)
+ /* claim host only for the first request */
+ mmc_claim_host(card->host);
+
+ ret = mmc_blk_part_switch(card, md);
+ if (ret) {
+ if (req) {
+ blk_end_request_all(req, -EIO);
+ }
+ ret = 0;
+ goto out;
+ }
+
+#endif
+ mmc_claim_host(card->host);
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ if (cmd_flags & REQ_DISCARD) {
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ if (req->cmd_flags & REQ_SECURE &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ else
+ ret = mmc_blk_issue_discard_rq(mq, req);
+ } else if (cmd_flags & REQ_FLUSH) {
+ /* complete ongoing async transfer before issuing flush */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ ret = mmc_blk_issue_flush(mq, req);
+ } else {
+ if (!req && host->areq) {
+ spin_lock_irqsave(&host->context_info.lock, flags);
+ host->context_info.is_waiting_last_req = true;
+ spin_unlock_irqrestore(&host->context_info.lock, flags);
+ }
+ ret = mmc_blk_issue_rw_rq(mq, req);
+ }
+
+//out:
+ //if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
+ // (cmd_flags & MMC_REQ_SPECIAL_MASK))
+ /*
+ * Release host when there are no more requests
+ * and after special request(discard, flush) is done.
+ * In case sepecial request, there is no reentry to
+ * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
+ */
+ mmc_release_host(card->host);
+ return ret;
+}
+
+static inline int mmc_blk_readonly(struct mmc_card *card)
+{
+ return mmc_card_readonly(card) ||
+ !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+}
+
+#if 0
+static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ struct device *parent,
+ sector_t size,
+ bool default_ro,
+ const char *subname,
+ int area_type)
+{
+ struct mmc_blk_data *md;
+ int devidx, ret;
+
+ devidx = find_first_zero_bit(dev_use, max_devices);
+ if (devidx >= max_devices)
+ return ERR_PTR(-ENOSPC);
+ __set_bit(devidx, dev_use);
+
+ md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+ if (!md) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * !subname implies we are creating main mmc_blk_data that will be
+ * associated with mmc_card with mmc_set_drvdata. Due to device
+ * partitions, devidx will not coincide with a per-physical card
+ * index anymore so we keep track of a name index.
+ */
+ if (!subname) {
+ md->name_idx = find_first_zero_bit(name_use, max_devices);
+ __set_bit(md->name_idx, name_use);
+ } else
+ md->name_idx = ((struct mmc_blk_data *)
+ dev_to_disk(parent)->private_data)->name_idx;
+
+ md->area_type = area_type;
+
+ /*
+ * Set the read-only status based on the supported commands
+ * and the write protect switch.
+ */
+ md->read_only = mmc_blk_readonly(card);
+
+ md->disk = alloc_disk(perdev_minors);
+ if (md->disk == NULL) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ spin_lock_init(&md->lock);
+ INIT_LIST_HEAD(&md->part);
+ md->usage = 1;
+
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+ if (ret)
+ goto err_putdisk;
+
+ md->queue.issue_fn = mmc_blk_issue_rq;
+ md->queue.data = md;
+
+ md->disk->major = MMC_BLOCK_MAJOR;
+ md->disk->first_minor = devidx * perdev_minors;
+ md->disk->fops = &mmc_bdops;
+ md->disk->private_data = md;
+ md->disk->queue = md->queue.queue;
+ md->disk->driverfs_dev = parent;
+ set_disk_ro(md->disk, md->read_only || default_ro);
+ md->disk->flags = GENHD_FL_EXT_DEVT;
+ if (area_type & MMC_BLK_DATA_AREA_RPMB)
+ md->disk->flags |= GENHD_FL_NO_PART_SCAN;
+
+ /*
+ * As discussed on lkml, GENHD_FL_REMOVABLE should:
+ *
+ * - be set for removable media with permanent block devices
+ * - be unset for removable block devices with permanent media
+ *
+ * Since MMC block devices clearly fall under the second
+ * case, we do not set GENHD_FL_REMOVABLE. Userspace
+ * should use the block device creation/destruction hotplug
+ * messages to tell when the card is present.
+ */
+
+ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+ "mmcblk%d%s", md->name_idx, subname ? subname : "");
+
+ if (mmc_card_mmc(card))
+ blk_queue_logical_block_size(md->queue.queue,
+ card->ext_csd.data_sector_size);
+ else
+ blk_queue_logical_block_size(md->queue.queue, 512);
+
+ set_capacity(md->disk, size);
+
+ if (mmc_host_cmd23(card->host)) {
+ if (mmc_card_mmc(card) ||
+ (mmc_card_sd(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+ md->flags |= MMC_BLK_CMD23;
+ }
+
+ if (mmc_card_mmc(card) &&
+ md->flags & MMC_BLK_CMD23 &&
+ ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ card->ext_csd.rel_sectors)) {
+ md->flags |= MMC_BLK_REL_WR;
+ blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ }
+
+ if (mmc_card_mmc(card) &&
+ (area_type == MMC_BLK_DATA_AREA_MAIN) &&
+ (md->flags & MMC_BLK_CMD23) &&
+ card->ext_csd.packed_event_en) {
+ if (!mmc_packed_init(&md->queue, card))
+ md->flags |= MMC_BLK_PACKED_CMD;
+ }
+
+ return md;
+
+ err_putdisk:
+ put_disk(md->disk);
+ err_kfree:
+ kfree(md);
+ out:
+ return ERR_PTR(ret);
+}
+
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+ sector_t size;
+ struct mmc_blk_data *md;
+
+ if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
+ /*
+ * The EXT_CSD sector count is in number or 512 byte
+ * sectors.
+ */
+ size = card->ext_csd.sectors;
+ } else {
+ /*
+ * The CSD capacity field is in units of read_blkbits.
+ * set_capacity takes units of 512 bytes.
+ */
+ size = card->csd.capacity << (card->csd.read_blkbits - 9);
+ }
+
+ md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
+ MMC_BLK_DATA_AREA_MAIN);
+ return md;
+}
+
+static int mmc_blk_alloc_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_type,
+ sector_t size,
+ bool default_ro,
+ const char *subname,
+ int area_type)
+{
+ char cap_str[10];
+ struct mmc_blk_data *part_md;
+
+ part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
+ subname, area_type);
+ if (IS_ERR(part_md))
+ return PTR_ERR(part_md);
+ part_md->part_type = part_type;
+ list_add(&part_md->part, &md->part);
+
+ string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+ pr_info("%s: %s %s partition %u %s\n",
+ part_md->disk->disk_name, mmc_card_id(card),
+ mmc_card_name(card), part_md->part_type, cap_str);
+ return 0;
+}
+
+/* MMC Physical partitions consist of two boot partitions and
+ * up to four general purpose partitions.
+ * For each partition enabled in EXT_CSD a block device will be allocatedi
+ * to provide access to the partition.
+ */
+
+static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
+{
+ int idx, ret = 0;
+
+ if (!mmc_card_mmc(card))
+ return 0;
+
+ for (idx = 0; idx < card->nr_parts; idx++) {
+ if (card->part[idx].size) {
+ ret = mmc_blk_alloc_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].force_ro,
+ card->part[idx].name,
+ card->part[idx].area_type);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void mmc_blk_remove_req(struct mmc_blk_data *md)
+{
+ struct mmc_card *card;
+
+ if (md) {
+ card = md->queue.card;
+ if (md->disk->flags & GENHD_FL_UP) {
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable)
+ device_remove_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
+
+ /* Stop new requests from getting into the queue */
+ del_gendisk(md->disk);
+ }
+
+ /* Then flush out any already in there */
+ mmc_cleanup_queue(&md->queue);
+ if (md->flags & MMC_BLK_PACKED_CMD)
+ mmc_packed_clean(&md->queue);
+ mmc_blk_put(md);
+ }
+}
+
+
+
+static void mmc_blk_remove_parts(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ struct list_head *pos, *q;
+ struct mmc_blk_data *part_md;
+
+ __clear_bit(md->name_idx, name_use);
+ list_for_each_safe(pos, q, &md->part) {
+ part_md = list_entry(pos, struct mmc_blk_data, part);
+ list_del(pos);
+ mmc_blk_remove_req(part_md);
+ }
+}
+
+#endif
+#if 0
+static void tSD_blk_remove_req(struct mmc_blk_data *tSD_device)
+{
+ int i;
+ if(tSD_device)
+ {
+ for(i = 0; i < partition__logic_incard; i++)
+ {
+ if( tSD_device->partitions[i].disk != NULL && tSD_device->partitions[i].disk->flags & GENHD_FL_UP){
+ /* Stop new requests from getting into the queue */
+ del_gendisk(tSD_device->partitions[i].disk);
+
+ }
+ }
+ /* Then terminate our worker thread */
+ kthread_stop(tSD_device->thread);
+ /* Then flush out any already in there */
+ tSD_cleanup_queue(&tSD_device->squeue);
+ tSD_cleanup_queue(&tSD_device->dqueue);
+ tSD_cleanup_queue(&tSD_device->uqueue);
+
+ tSD_blk_put(tSD_device);
+ }
+}
+#endif
+#if 0
+static int mmc_add_disk(struct mmc_blk_data *md)
+{
+ int ret;
+ struct mmc_card *card = md->queue.card;
+
+ add_disk(md->disk);
+ md->force_ro.show = force_ro_show;
+ md->force_ro.store = force_ro_store;
+ sysfs_attr_init(&md->force_ro.attr);
+ md->force_ro.attr.name = "force_ro";
+ md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
+ if (ret)
+ goto force_ro_fail;
+
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable) {
+ umode_t mode;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
+ mode = S_IRUGO;
+ else
+ mode = S_IRUGO | S_IWUSR;
+
+ md->power_ro_lock.show = power_ro_lock_show;
+ md->power_ro_lock.store = power_ro_lock_store;
+ sysfs_attr_init(&md->power_ro_lock.attr);
+ md->power_ro_lock.attr.mode = mode;
+ md->power_ro_lock.attr.name =
+ "ro_lock_until_next_power_on";
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
+ if (ret)
+ goto power_ro_lock_fail;
+ }
+ return ret;
+
+power_ro_lock_fail:
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+force_ro_fail:
+ del_gendisk(md->disk);
+
+ return ret;
+}
+#endif
+
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
+
+static const struct mmc_fixup blk_fixups[] =
+{
+ MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+
+ /*
+ * Some MMC cards experience performance degradation with CMD23
+ * instead of CMD12-bounded multiblock transfers. For now we'll
+ * black list what's bad...
+ * - Certain Toshiba cards.
+ *
+ * N.B. This doesn't affect SD cards.
+ */
+ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some Micron MMC cards needs longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
+ * On these Samsung MoviNAND parts, performing secure erase or
+ * secure trim can result in unrecoverable corruption due to a
+ * firmware bug.
+ */
+ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
+ END_FIXUP
+};
+
+static void mmc_flush(void)
+{
+
+}
+
+int init_card_proc(void);
+
+
+
+static int mmc_blk_probe(struct mmc_card *card)
+{
+ int i;
+ int ret;
+ unsigned int size;
+ unsigned int mbr_num;
+
+ if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
+ /*
+ * The EXT_CSD sector count is in number or 512 byte
+ * sectors.
+ */
+ size = card->ext_csd.sectors;
+ } else {
+ /*
+ * The CSD capacity field is in units of read_blkbits.
+ * set_capacity takes units of 512 bytes.
+ */
+ size = card->csd.capacity << (card->csd.read_blkbits - 9);
+ }
+
+ UPGRADE_DBG_INF("card->csd.read_blkbits = %u, card total size = %u sectors\n", card->csd.read_blkbits, size);
+ card_total_size = size;
+ tSD_card = card;
+
+ mmc_fixup_device(card, blk_fixups);
+
+ size = tSD_get_logic_cat();
+ printk("logic_size %u: phy_size %u\n", size,card_total_size);
+
+ ret = calculate_part_num();
+ if(ret == -1){
+ printk("%s,%d,get partition error!\n", __FUNCTION__, __LINE__);
+ return 0;
+ }else{
+ printk("%s,%d, partition__logic_incard:%d\n", __FUNCTION__, __LINE__, ret);
+ }
+
+ ret = get_afi_configuration();
+ if(ret){
+ printk("%s err!\n", __FUNCTION__);
+ }
+
+ if(is_for_upgrade)
+ {
+ AdfuUpdateMbrFromPhyToUsr = UpdateMbrFromPhyToUsr;
+ adfu_flush_nand_cache = mmc_flush;
+ adfus_nand_read = tSD_adfu_read;
+ adfus_nand_write = tSD_adfu_write;
+ }
+
+ ret = tSD_queue_init(&tSD_device_md);
+ if(ret){
+ UPGRADE_DBG_INF("%s err !\n",__FUNCTION__);
+ }
+
+ ret = init_tSD_part_myself();
+
+ for(i = 0; i < partition__logic_incard; i++)
+ {
+ ret = tSD_partition_init(&tSD_device_md, i);
+ if(ret){
+ printk("%s err!,need to check\n", __FUNCTION__);
+ break;
+ }
+ }
+
+
+ if(is_force_format == 1)
+ {
+ struct uparam adfu_uparam;
+ char *buf=NULL;
+ buf = kmalloc(FORMATBYTE, GFP_KERNEL);
+ if( NULL == buf){
+ printk("Err:%s:malloc buf fail\n",__FUNCTION__);
+ return -1;
+ }
+ memset(buf, 0xff, FORMATBYTE);
+ //erase 4K boot phy partion head , total 4 mbrc
+ for(mbr_num = 0; mbr_num < MBRC_NUM; mbr_num++){
+ __do_adfu_write(mbr_num*BOOT_PHY_SIZE/PSECTBYTE, \
+ FORMATBYTE/PSECTBYTE, buf);
+ }
+ //erase 4K for partion head
+ for (i = 0; i < partition__logic_incard; i++){
+ adfu_uparam.flash_partition = i;
+ if(tSD_part[i].partsize > 0){
+
+ tSD_adfu_write(0, FORMATBYTE/PSECTBYTE, buf, &adfu_uparam);
+ }
+ }
+ kfree(buf);
+ }
+
+ NAND_InitMiscInfo();
+
+ return 0;
+}
+
+
+#if 0
+static void mmc_blk_remove(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ mmc_blk_remove_parts(card, md);
+ mmc_claim_host(card->host);
+ mmc_blk_part_switch(card, md);
+ mmc_release_host(card->host);
+ mmc_blk_remove_req(md);
+ mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
+}
+#endif
+
+static void tSD_blk_remove(struct mmc_card *card)
+{
+ return ;
+// tSD_blk_remove_req(&tSD_device_md);
+}
+
+#if 0
+#ifdef CONFIG_PM
+static int mmc_blk_suspend(struct mmc_card *card)
+{
+ struct mmc_blk_data *part_md;
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ if (md) {
+ mmc_queue_suspend(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_suspend(&part_md->queue);
+ }
+ }
+ return 0;
+}
+
+static int mmc_blk_resume(struct mmc_card *card)
+{
+ struct mmc_blk_data *part_md;
+ struct mmc_blk_data *md = mmc_get_drvdata(card);
+
+ if (md) {
+ /*
+ * Resume involves the card going into idle state,
+ * so current partition is always the main one.
+ */
+ md->part_curr = md->part_type;
+ mmc_queue_resume(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_resume(&part_md->queue);
+ }
+ }
+ return 0;
+}
+#else
+#define mmc_blk_suspend NULL
+#define mmc_blk_resume NULL
+#endif
+
+#endif
+
+static struct mmc_driver mmc_driver = {
+ .drv = {
+ .name = "tsd_card",
+ },
+ .probe = mmc_blk_probe,
+ .remove = tSD_blk_remove,
+// .remove = mmc_blk_remove,
+// .suspend = mmc_blk_suspend,
+// .resume = mmc_blk_resume,
+ //tsd card don't need to suspend
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+
+static int __init mmc_blk_init(void)
+{
+ int res;
+ unsigned boot_dev=0;
+
+ if(owl_set_carddev_match_name()!=0){
+ printk("err:%s:tsd set name err\n",__FUNCTION__);
+ res = -1;
+ goto out;
+ }
+ if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
+ pr_info("mmcblk: using %d minors per device\n", perdev_minors);
+
+ max_devices = 256 / perdev_minors;
+
+ if(!card_to_card){
+ res = init_card_proc();
+ if(res){
+ printk("%s:init_card_proc err\n",__FUNCTION__);
+ goto out;
+ }
+ }
+
+ if(card_to_card){
+ mmc_driver.drv.name = "card_to_card";
+ tsd_major = 94;
+ blkdev_name = "card_to_card";
+ }
+
+ res = register_blkdev(tsd_major,blkdev_name);
+ if (res)
+ goto out;
+
+ res = mmc_register_driver(&mmc_driver);
+ if (res)
+ goto out2;
+
+ //TODO: register hdcp key get funtion here.
+ boot_dev = owl_get_boot_dev();
+ printk("%s: bootdev 0x%x\n", __FUNCTION__, boot_dev);
+ //only sd->nand ,will not use owl_register_secure_storage
+ if(boot_dev != OWL_BOOTDEV_SD02NAND ) {
+ if(0 !=owl_register_secure_storage(&card_secure))
+ printk("%s:owl_register_secure_storage fail\n ",__FUNCTION__);
+ }
+
+ return 0;
+ out2:
+ unregister_blkdev(tsd_major, blkdev_name);
+ out:
+ return res;
+}
+
+static void __exit mmc_blk_exit(void)
+{
+ owl_unregister_secure_storage(&card_secure);
+ /*
+ * free the ram space
+ */
+ if(capinfo != NULL)
+ kfree(capinfo);
+ if(tSD_part != NULL)
+ kfree(tSD_part);
+ if(tSD_device_md.partitions != NULL)
+ kfree(tSD_device_md.partitions);
+
+ mmc_unregister_driver(&mmc_driver);
+ unregister_blkdev(tsd_major,blkdev_name);
+}
+
+
+/*
+* function for tSD
+*/
+
+int tSD_pre_data_transfer(unsigned start, unsigned nsector, void *buf, unsigned blksz, int write)
+{
+ int cur_sects;
+ int j, i;
+ int ret;
+
+ UPGRADE_DBG_INF("start = %d, nsector = %d, buf = 0x%x, \
+ blksz = %d, write = %d\n", start, nsector, (unsigned int)buf, blksz, write);
+ cur_sects = nsector;
+ ret = 0;
+ i = 0;
+ j = nsector/256;
+ // single data transfer 128K
+ if(j > 0){
+ cur_sects = 256;
+ }
+
+ do
+ {
+ if(0 == cur_sects){
+ break;
+ }
+ UPGRADE_DBG_INF("start = %d, nsector = %d, buf = 0x%x, blksz = %d, write = %d\n",\
+ start, cur_sects, (unsigned int)buf, blksz, write);
+ ret = tSD_data_transfer(tSD_card, buf, start, cur_sects, blksz, write);
+ if(ret){
+ break;
+ }
+ i++;
+ start += 256;
+ buf += (256*512);
+
+ if(j > i){
+ cur_sects = 256;
+ }else{
+ cur_sects = nsector - (i*256);
+ }
+ }while(i <= j);
+
+ return ret;
+}
+
+int __do_adfu_read(unsigned start, unsigned nsector, void *buf)
+{
+ int ret = 0;
+// UPGRADE_DBG_INF("start = %d, nsector = %d, buf = 0x%x\n", start, nsector, (unsigned int)buf);
+ if(NULL == tSD_card){
+ printk("Err: %s : tSD_card is NULL,please check\n",__FUNCTION__);
+ return -1;
+ }
+ mmc_claim_host(tSD_card->host);
+ ret = tSD_pre_data_transfer(start, nsector, buf, TSD_BLOCKSZ, TSD_READ);
+ mmc_release_host(tSD_card->host);
+ return ret;
+}
+int __do_adfu_write(unsigned start, unsigned nsector, void * buf)
+{
+ int ret = 0;
+// UPGRADE_DBG_INF("start = %d, nsector = %d, buf = 0x%x\n", start, nsector, (unsigned int)buf);
+ if(NULL == tSD_card){
+ printk("Err: %s : tSD_card is NULL,please check\n",__FUNCTION__);
+ return -1;
+ }
+ mmc_claim_host(tSD_card->host);
+ ret =tSD_pre_data_transfer(start, nsector, buf, TSD_BLOCKSZ, TSD_WRITE);
+ mmc_release_host(tSD_card->host);
+ return ret;
+}
+
+int tSD_data_transfer(struct mmc_card * card, unsigned char * buf, unsigned start, unsigned blocks, unsigned blksz, int write)
+{
+ struct mmc_request mrq = {0};
+ struct mmc_command cmd = {0};
+ struct mmc_command stop = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg ;
+
+// UPGRADE_DBG_INF("start = %d, blocks = %d, buf = 0x%x, blksz = %d ,write = %d\n", start, blocks, (unsigned int)buf, blksz, write);
+
+ data.sg = &sg;
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+ tSD_prepare_mrq(card, buf, &mrq, start, blocks, blksz, write);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ tSD_wait_busy(card);
+
+ return tSD_test_check_result(card, &mrq);
+}
+
+/*
+ * Fill in the mmc_request structure given a set of transfer parameters.
+ */
+static void tSD_prepare_mrq(struct mmc_card *card, unsigned char *buf,
+ struct mmc_request *mrq, unsigned start, unsigned blocks, unsigned blksz, int write)
+{
+
+ BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
+
+ if (blocks > 1) {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
+ } else {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+ }
+
+ mrq->cmd->arg = start;
+ if (!mmc_card_blockaddr(card))
+ mrq->cmd->arg <<= 9;
+
+ mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ if (blocks == 1)
+ mrq->stop = NULL;
+ else {
+ mrq->stop->opcode = MMC_STOP_TRANSMISSION;
+ mrq->stop->arg = 0;
+ mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+ }
+
+ mrq->data->blksz = blksz;
+ mrq->data->blocks = blocks;
+ mrq->data->sg_len = 1;
+
+ sg_init_one(mrq->data->sg,buf, blksz*blocks);
+
+ mrq->data->flags = write ?
+ MMC_DATA_WRITE : MMC_DATA_READ;
+ //printk("%s,%d,buf = 0x%x, mrq->data->sg_len = 0x%x\n",__FUNCTION__,__LINE__,(unsigned int)buf,mrq->data->sg_len);
+ mmc_set_data_timeout(mrq->data, card);
+}
+
+
+
+
+/*
+ * Wait for the card to finish the busy state
+ */
+static int tSD_wait_busy(struct mmc_card *card)
+{
+ int ret, busy;
+ struct mmc_command cmd = {0};
+
+ busy = 0;
+ do {
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ ret = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (ret)
+ break;
+
+ if (!busy && tSD_test_busy(&cmd)) {
+ busy = 1;
+ if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ pr_info("%s: Warning: Host did not "
+ "wait for busy state to end.\n",
+ mmc_hostname(card->host));
+ }
+ } while (tSD_test_busy(&cmd));
+
+ return ret;
+}
+static int tSD_test_busy(struct mmc_command *cmd)
+{
+ return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
+}
+static int tSD_test_check_result(struct mmc_card *card, struct mmc_request *mrq)
+{
+ int ret;
+
+ BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+
+ ret = 0;
+
+ if (!ret && mrq->cmd->error)
+ ret = mrq->cmd->error;
+ if (!ret && mrq->data->error)
+ ret = mrq->data->error;
+ if (!ret && mrq->stop && mrq->stop->error)
+ ret = mrq->stop->error;
+ if (!ret && mrq->data->bytes_xfered !=
+ mrq->data->blocks * mrq->data->blksz)
+ ret = 1;
+
+ if (ret == -EINVAL)
+ ret = 2;
+
+ return ret;
+}
+
+int tSD_single_blk_test(unsigned start, unsigned blocks )
+{
+ char *r_buf, *w_buf;
+ char *pc_char, *pc_temp;
+ int i, j, ret;
+
+ UPGRADE_DBG_INF("start = %d, blocks = %d\n",start, blocks);
+
+ w_buf = (char *)kmalloc(TSD_BLOCKSZ, GFP_DMA);
+ r_buf = (char *)kmalloc(TSD_BLOCKSZ * blocks, GFP_DMA);
+
+ if(r_buf == NULL || w_buf == NULL)
+ {
+ printk("%s,malloc buf error!\n",__FUNCTION__);
+ return -1;
+ }
+ else
+ {
+ printk("malloc r_buf at [0x%x]\n",(unsigned int) r_buf);
+ printk("malloc w_buf at[0x%x]\n",(unsigned int)w_buf);
+ }
+ memset(w_buf,0,TSD_BLOCKSZ);
+ memset(r_buf,0,TSD_BLOCKSZ * blocks);
+ pc_char = w_buf;
+ for(i = 0; i < TSD_BLOCKSZ; i++)
+ {
+ *pc_char = 0xaa;
+ pc_char++;
+ }
+ pc_char = w_buf;
+ printk("%s,%d,data in w_buf\n",__FUNCTION__,__LINE__);
+ for(i = 0; i < TSD_BLOCKSZ/16; i++)
+ {
+ printk("w_buf %d :" ,i);
+ for(j=0; j < 16; j++)
+ {
+ printk(" 0x%x ",*pc_char);
+ pc_char++;
+ }
+ printk("\n");
+ }
+
+ //write signle data to tSD card
+ for(i = 0; i < blocks; i++)
+ {
+ ret = __do_adfu_write(start + i, 1, w_buf);
+ if(ret)
+ {
+ printk("%s,%d,write data err!i = %d, ret = %d\n",__FUNCTION__,__LINE__, i, ret);
+ return ret;
+ }
+ }
+
+ //read signle data from tSD card
+ for(i = 0; i < blocks; i++)
+ {
+ ret = __do_adfu_read(start + i, 1, r_buf + i*TSD_BLOCKSZ);
+ if(ret)
+ {
+ printk("%s,%d,write data err!i = %d, ret = %d\n",__FUNCTION__,__LINE__, i, ret);
+ return ret;
+ }
+ }
+ printk("malloc r_buf at [0x%x]\n",(unsigned int) r_buf);
+ pc_char = r_buf;
+ printk("%s,%d,data in r_buf\n",__FUNCTION__,__LINE__);
+ for(i = 0; i < TSD_BLOCKSZ/16; i++)
+ {
+ printk("r_buf %d :" ,i);
+ for(j=0; j < 16; j++)
+ {
+ printk(" 0x%x ",*pc_char);
+ pc_char++;
+ }
+ printk("\n");
+ }
+ //compare data :read and write
+ for(i = 0; i < blocks; i++)
+ {
+ pc_char = r_buf + i*TSD_BLOCKSZ;
+ pc_temp = w_buf;
+ for(j = 0; j < TSD_BLOCKSZ; j++)
+ {
+ if(*pc_char != *pc_temp)
+ {
+ printk("%s,%d,compare data err! i = %d, j = %d\n",__FUNCTION__,__LINE__,i,j);
+ return -1;
+ }
+ pc_char++;
+ pc_temp++;
+ }
+ }
+
+ printk("%s test OK!\n",__FUNCTION__);
+ kfree(r_buf);
+ kfree(w_buf);
+ return 0;
+
+}
+//static int tSD_multiple_blk_test(unsigned start, unsigned blocks, unsigned times)
+//{
+// char *r_buf, *w_buf;
+// char *pc_char, *pc_temp;
+// int i, j, ret;
+//
+// UPGRADE_DBG_INF("start = %d, blocks = %d, times = %d\n",start, blocks, times);
+//
+// w_buf = (char *)kmalloc(TSD_BLOCKSZ * blocks, GFP_DMA);
+// r_buf = (char *)kmalloc(times*(TSD_BLOCKSZ * blocks), GFP_DMA);
+//
+// if( r_buf == NULL || w_buf == NULL)
+// {
+// printk("%s,malloc buf error!\n",__FUNCTION__);
+// return -1;
+// }
+// else
+// {
+// printk("malloc r_buf at [0x%x]\n",(unsigned int) r_buf);
+// printk("malloc w_buf at[0x%x]\n",(unsigned int)w_buf);
+// }
+// memset(w_buf,0,TSD_BLOCKSZ * blocks);
+// memset(r_buf,0,times*(TSD_BLOCKSZ * blocks));
+// pc_char = w_buf;
+// for(i = 0; i < TSD_BLOCKSZ * blocks; i++)
+// {
+// *pc_char = 0x5a;
+// pc_char++;
+// }
+// //write signle data to tSD card
+// for(i = 0; i < times; i++)
+// {
+// ret = __do_adfu_write(start+ i*blocks, blocks, w_buf);
+// if(ret)
+// {
+// printk("%s,%d,write data err!i = %d, ret = %d\n",__FUNCTION__,__LINE__, i, ret);
+// return ret;
+// }
+// }
+// //read signle data from tSD card
+// for(i = 0; i < times; i++)
+// {
+// ret = __do_adfu_read(start + i*blocks, blocks, r_buf + i*(TSD_BLOCKSZ*blocks));
+// if(ret)
+// {
+// printk("%s,%d,write data err!i = %d, ret = %d\n",__FUNCTION__,__LINE__, i, ret);
+// return ret;
+// }
+// }
+// //compare data :read and write
+// for(i = 0; i < times; i++)
+// {
+// pc_char = r_buf + i*(TSD_BLOCKSZ * blocks);
+// pc_temp = w_buf;
+// for(j = 0; j < TSD_BLOCKSZ * blocks; j++)
+// {
+// if(*pc_char != *pc_temp)
+// {
+// printk("%s,%d,compare data err! i = %d, j = %d\n",__FUNCTION__,__LINE__,i,j);
+// return -1;
+// }
+// pc_char++;
+// pc_temp++;
+// }
+// }
+//
+// printk("%s test OK!\n",__FUNCTION__);
+// kfree(r_buf);
+// kfree(w_buf);
+// return 0;
+//}
+//static void tSD_transfer_interface_test(void)
+//{
+// int ret;
+// unsigned start;
+// unsigned blocks;
+// unsigned times;
+//
+// start = 100;
+// blocks =10;
+// times = 1;
+//
+// printk("--------------------\n");
+// printk("tSD_single_blk_test\n");
+// ret = tSD_single_blk_test(100,1);
+// printk("--------------------\n");
+// printk("tSD_multiple_blk_test\n");
+// //ret = tSD_multiple_blk_test(500, 488, 1);
+//}
+
+static int tSD_queue_init(struct mmc_blk_data *tSD_device)
+{
+ int ret;
+ int devidx;
+
+ UPGRADE_DBG_INF("\n");
+
+ if (tSD_device == NULL){
+ ret = -1;
+ return ret;
+ }
+
+ tSD_device->area_type = MMC_BLK_DATA_AREA_MAIN;
+ tSD_device->usage = 1;
+ mmc_set_drvdata(tSD_card, tSD_device);
+
+ devidx = find_first_zero_bit(dev_use, max_devices);
+ if (devidx >= max_devices)
+ return -28;
+ __set_bit(devidx, dev_use);
+
+ tSD_device->name_idx = find_first_zero_bit(name_use, max_devices);
+ __set_bit(tSD_device->name_idx, name_use);
+
+ //init system queue
+ spin_lock_init(&tSD_device->slock);
+ ret = tSD_init_queue(&tSD_device->squeue, tSD_card, &tSD_device->slock, NULL);
+ tSD_device->squeue.issue_fn = mmc_blk_issue_rq;
+ tSD_device->squeue.data = tSD_device;
+ //init data queue
+ spin_lock_init(&tSD_device->dlock);
+ ret = tSD_init_queue(&tSD_device->dqueue, tSD_card, &tSD_device->dlock, NULL);
+ tSD_device->dqueue.issue_fn = mmc_blk_issue_rq;
+ tSD_device->dqueue.data = tSD_device;
+ //init udisk queue
+ spin_lock_init(&tSD_device->ulock);
+ ret = tSD_init_queue(&tSD_device->uqueue, tSD_card, &tSD_device->ulock, NULL);
+ tSD_device->uqueue.issue_fn = mmc_blk_issue_rq;
+ tSD_device->uqueue.data = tSD_device;
+
+
+
+ //init kthread
+ sema_init(&tSD_device->thread_sem, 1);
+ if(card_to_card){
+ tSD_device->thread = kthread_run(tSD_queue_thread, tSD_device, "card_card_thread");
+ }else{
+ tSD_device->thread = kthread_run(tSD_queue_thread, tSD_device, "tsd_thread");
+ }
+
+ if (IS_ERR(tSD_device->thread)) {
+ ret = PTR_ERR(tSD_device->thread);
+ printk("%s, alloc kthread err!please check!\n",__FUNCTION__);
+ }
+
+ return 0;
+}
+
+//static int tSD_partition_init(struct mmc_blk_data *tSD_device, int part_num)
+//{
+// UPGRADE_DBG_INF("\n");
+// device->partitions[part_num].md = tSD_blk_alloc_md(device->card,
+// device,
+// &device->card->dev,
+// &device->mq,
+// part_num,
+// device->partitions[part_num].size,
+// device->major,
+// device->minorbits,
+// false,
+// NULL,
+// MMC_BLK_DATA_AREA_MAIN);
+//
+//// mmc_set_drvdata(device->card, device->partitions[part_num].md);
+//// mmc_fixup_device(device->card, blk_fixups);
+//
+// UPGRADE_DBG_INF("\n");
+//
+//// if (mmc_add_disk(device->partitions[part_num].md))
+//// goto out;
+//
+// return 0;
+//
+// out:
+// mmc_blk_remove_req(device->partitions[part_num].md);
+// return 0;
+//
+//
+//}
+
+static int tSD_partition_init(struct mmc_blk_data *tSD_device, int part_num)
+{
+
+ tSD_device->partitions[part_num].num = part_num;
+ if(tSD_part[part_num].type != PART_DUMMY){
+ tSD_device->partitions[part_num].offset = tSD_part[part_num].off_size;
+ tSD_device->partitions[part_num].size = tSD_part[part_num].partsize;
+ tSD_device->partitions[part_num].attr = 0;
+
+ printk("tSD_device->partitions[%d] .offset=%lu, size=%lu\n", part_num
+ , tSD_device->partitions[part_num].offset
+ , tSD_device->partitions[part_num].size);
+
+ if(tSD_device->partitions[part_num].size == 0){
+ printk("%s() %d the disk(%d) is 0 size ,so not alloc disk.\n", __FUNCTION__, __LINE__, part_num);
+ return 0;
+ }
+
+ tSD_device->partitions[part_num].disk = alloc_disk(1 << tSD_device->minorbits);
+ if (!tSD_device->partitions[part_num].disk){
+ printk("%s() %d alloc_disk (%d) failed.\n", __FUNCTION__, __LINE__, part_num);
+ goto _out;
+ }
+
+ tSD_device->partitions[part_num].disk->major = tsd_major;
+ tSD_device->partitions[part_num].disk->first_minor = part_num << tSD_device->minorbits;
+ tSD_device->partitions[part_num].disk->fops = &mmc_bdops;
+ if(tsd_major == 93){
+ snprintf (tSD_device->partitions[part_num].disk->disk_name, 32, "act%c", 'a'+part_num);
+ }else{
+ snprintf (tSD_device->partitions[part_num].disk->disk_name, 32, "burn%c", 'a'+part_num);
+ }
+ tSD_device->partitions[part_num].disk->private_data = tSD_device;
+ set_capacity(tSD_device->partitions[part_num].disk, tSD_device->partitions[part_num].size);
+
+ if (part_num == unassign_partnum){
+ tSD_device->partitions[part_num].disk->queue = tSD_device->uqueue.queue;
+ blk_queue_logical_block_size(tSD_device->uqueue.queue, 512);
+ }else if (part_num == ANDROID_DATA_ACCESS){
+ tSD_device->partitions[part_num].disk->queue = tSD_device->dqueue.queue;
+ blk_queue_logical_block_size(tSD_device->dqueue.queue, 512);
+ }else{
+ tSD_device->partitions[part_num].disk->queue = tSD_device->squeue.queue;
+ blk_queue_logical_block_size(tSD_device->squeue.queue, 512);
+ /*remove waring : WARN_ON_ONCE(q->bypass_depth < 0)*/
+ tSD_device->squeue.queue->bypass_depth = 1 ;
+ }
+
+ if (tSD_part[part_num].type == PART_NO_ACCESS){
+ tSD_device->partitions[part_num].attr |= NAND_PART_OP_NA;
+ }else if (tSD_part[part_num].type == PART_READONLY){
+ tSD_device->partitions[part_num].attr |= NAND_PART_OP_RO;
+ }else if (tSD_part[part_num].type == PART_WRITEONLY){
+ tSD_device->partitions[part_num].attr |= NAND_PART_OP_WO;
+ }if (tSD_device->partitions[part_num].attr & NAND_PART_OP_RO){
+ set_disk_ro(tSD_device->partitions[part_num].disk, 1);
+ }
+
+ add_disk(tSD_device->partitions[part_num].disk);
+ printk("add_disk act%c success!\n", 'a'+part_num);
+ }
+
+ return 0;
+ _out:
+ return -1;
+}
+
+//static struct mmc_blk_data *tSD_blk_alloc_md(struct mmc_card *card,
+// struct mmc_blk_data *tSD_device,
+// struct device *parent,
+// struct mmc_queue *mq,
+// int part_num,
+// sector_t partition_size,
+// int major,
+// int minorbits,
+// bool default_ro,
+// const char *subname,
+// int area_type)
+//{
+//
+// struct mmc_blk_data *md;
+// int ret;
+//
+// md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+// if (!md) {
+// ret = -ENOMEM;
+// goto out;
+// }
+//
+// md->area_type = area_type;
+//
+// /*
+// * Set the read-only status based on the supported commands
+// * and the write protect switch.
+// */
+// md->read_only = mmc_blk_readonly(card);
+//
+// md->disk = alloc_disk(1 << minorbits);
+// if (md->disk == NULL) {
+// ret = -ENOMEM;
+// goto err_kfree;
+// }
+//
+// md->usage = 1;
+//
+// md->disk->major = major;
+// md->disk->first_minor = 1 << minorbits;
+// md->disk->fops = &mmc_bdops;
+// md->disk->private_data = tSD_device;
+// md->disk->queue = mq->queue;
+//
+// snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+// "act%c", 'w'+part_num);
+//
+//
+// blk_queue_logical_block_size(mq->queue, 512);
+// set_capacity(md->disk, partition_size);
+//
+// UPGRADE_DBG_INF("\n");
+//
+// mmc_add_disk(md);
+//
+// return md;
+//
+// //err_putdisk:
+// put_disk(md->disk);
+// err_kfree:
+// kfree(md);
+// out:
+// return ERR_PTR(ret);
+//}
+
+
+static int tSD_blk_ioctl(struct block_device * bdev, fmode_t mode, unsigned int
+ cmd, unsigned long arg)
+{
+ int ret = 0;
+
+ switch(cmd)
+ {
+ case BOOT_ERASE:
+ case BOOT_READ:
+ case BOOT_WRITE:
+ {
+ ret = boot_operation(arg, cmd);
+ return ret;
+ }
+ case BOOT_GETINFO:
+ {
+ ret = get_boot_media_info(arg);
+ return ret;
+ }
+ case ACCESS_MISC_INFO:
+ {
+ ret = handle_misc_info(arg);
+ return ret;
+ }
+ default:
+ break;
+ }
+
+ // printk("%s() %d: nand drv miss cmd %x\n", __FUNCTION__, __LINE__, cmd);
+ return -ENOTTY; /* unknown command */
+}
+
+int handle_misc_info(unsigned int arg)
+{
+ int ret = 0;
+ char *buf;
+ MiscInfo_t p;
+ ret = copy_from_user(&p, (void*)arg, sizeof(MiscInfo_t));
+ if(ret)
+ {
+ printk("%s err!%d\n", __FUNCTION__, __LINE__);
+ }
+
+ //PRINT("handle misc info %d %x %x %x\n", p.dir, p.type, p.buf, p.size);
+ printk("[tsd/emmc]handle misc info\n");
+
+ if(p.size < 8192)
+ {
+ buf = MALLOC(p.size);
+ if(buf == NULL)
+ {
+ printk("%s malloc err,%d\n!", __FUNCTION__, p.size);
+ return -1;
+ }
+ }
+ else
+ {
+ printk("%s ERR\n", __FUNCTION__);
+ return 0;
+ }
+
+ if(p.dir == 0) //read
+ {
+ ret = NAND_GetMiscInfo(p.type, buf, p.size);
+ if(copy_to_user((void *)p.buf, buf, p.size))
+ {
+ printk("%s err!%d\n", __FUNCTION__, __LINE__);
+ return -1;
+ }
+ }
+ else if(p.dir == 1)
+ {
+ if(copy_from_user(buf, p.buf, p.size))
+ {
+ printk("%s err!%d\n", __FUNCTION__, __LINE__);
+ return -1;
+ }
+ ret = NAND_WriteMiscInfo(p.type, buf, p.size);
+ }
+
+ if(buf)
+ {
+ FREE(buf);
+ buf = NULL;
+ }
+ printk("[tsd/emmc] %s done\n", __FUNCTION__);
+ return ret;
+}
+
+
+int get_boot_media_info(unsigned int arg)
+{
+ int ret = 0;
+ unsigned long i = 0;
+ boot_medium_info_t bmi;
+
+ memset(&bmi,0,sizeof(boot_medium_info_t));
+ printk("%s %d bmi.medium_type:%d\n", __FUNCTION__, __LINE__, FLASH_TYPE_CARD);
+ bmi.medium_type = FLASH_TYPE_CARD;
+// if (NAND_IS_SMALL_BLOCK)
+// {
+// bmi.medium_type = 0;
+// }
+// else
+// {
+// bmi.medium_type = 1;
+// }
+
+ bmi.pageAddrPerBlock = PAGE_PER_BLK;
+ bmi.pagePerBlock = PAGE_PER_BLK;
+ bmi.sec_per_page = SEC_PER_PAGE;
+ bmi.sec_per_boot_page = SEC_PER_BOOT_PAGE;
+// bmi.ecc_bits = NandDevInfo.NandFlashInfo->BromECCUnitParam->ECCBitsPerECCUnit;
+// bmi.ud_bytes = NandDevInfo.NandFlashInfo->BromECCUnitParam->UserDataBytesPerECCUnit;
+// bmi.ecc_bits = 0;
+// bmi.ud_bytes = 0;
+// bmi.readretry = 0;
+// MEMSET(bmi.lsb_tbl, 0, 128);
+// if (NAND_NEED_READ_RETRY!=0)
+// {
+// bmi.readretry = 1;
+// MEMCPY(bmi.lsb_tbl, &(NandStorageInfo.RrStorageInfo.SmodeMap),128);
+// }
+
+// MEMCPY(bmi.chipid, &(NandChipInfo), 64);
+ bmi.data_blk_per_zone = DATA_BLK_NUM_PER_ZONE;
+ i = copy_to_user((void *)arg, &bmi, sizeof(boot_medium_info_t));
+ if(i){
+ printk("%s err!\n", __FUNCTION__);
+ }
+ printk("%s %d\n", __FUNCTION__, __LINE__);
+ return ret;
+}
+
+int boot_operation(unsigned int arg, unsigned int cmd)
+{
+ int ret = 0;
+ unsigned long i;
+ boot_op_t bootop;
+ unsigned char * buffer = NULL;
+ unsigned char * usr_buff = NULL;
+ unsigned int page_size; //in bytes
+
+// down(&tSD_device_md.mutex);
+// printk("%s %d\n", __FUNCTION__, __LINE__);
+
+ //get param from user space.
+ i = copy_from_user(&bootop, (void*)arg, sizeof(boot_op_t));
+ if(i){
+ printk("%s err!%d\n", __FUNCTION__, __LINE__);
+ }
+
+ if((cmd == BOOT_READ) || (cmd == BOOT_WRITE))
+ {
+ page_size = SEC_PER_PAGE * SEC_SIZE;
+ buffer = kmalloc(page_size, GFP_KERNEL);
+ if (buffer == NULL)
+ {
+ ret = -1;
+ goto _out;
+ }
+ usr_buff = bootop.buffer;
+ //use kernel buffer to read data,
+ bootop.buffer = buffer;
+ }
+
+ switch(cmd)
+ {
+ case BOOT_READ:
+ ret = boot_phy_op_entry(&bootop, BOOT_READ);
+ i = copy_to_user(usr_buff, buffer, page_size);
+ break;
+ case BOOT_WRITE:
+ i = copy_from_user(buffer, usr_buff, page_size);
+ ret = boot_phy_op_entry(&bootop, BOOT_WRITE);
+ break;
+ case BOOT_ERASE:
+ bootop.page = 0;
+ bootop.buffer = 0;
+ ret = boot_phy_op_entry(&bootop, BOOT_ERASE);
+ break;
+ default:
+ printk("%s() %d boot operation cmd error!!!\n", __FUNCTION__, __LINE__);
+ break;
+ }
+
+ if(i){
+ printk("%s err!%d\n", __FUNCTION__, __LINE__);
+ }
+ if((cmd == BOOT_READ) || (cmd == BOOT_WRITE))
+ {
+ kfree(buffer);
+ }
+_out:
+// up(&tSD_device_md.mutex);
+// printk("%s %d\n", __FUNCTION__, __LINE__);
+ return ret;
+}
+
+#define DUMP_BUFFER(buf, start, size) {\
+ unsigned char *ptr;\
+ int i;\
+ ptr = (char*)buf+start;\
+ printk("%d: \n", start);\
+ for(i = 0; i < size; i++)\
+ {\
+ if(i % 16 == 0)\
+ printk("%d: ", start+i);\
+ printk("%.2x ", *ptr++);\
+ if(i % 16 == 15)\
+ printk("\n");\
+ }\
+ printk("\n");\
+ }
+
+int boot_phy_op_entry(boot_op_t * op, unsigned int cmd)
+{
+ int ret = 0;
+ unsigned start;
+ unsigned nsector;
+ unsigned phyblk_num;
+ unsigned page_in_blk;
+ unsigned char *buffer;
+
+ phyblk_num = op->blk;
+ page_in_blk = op->page;
+ buffer = op->buffer;
+
+ start =convert_to_sector(phyblk_num, page_in_blk);
+ nsector = SEC_PER_PAGE;
+
+ switch(cmd)
+ {
+ case BOOT_READ:
+// printk("BOOT_READ block=%d, page=%d, start=%d, nsector=%d buffer=%p\n", op->blk, op->page, start, nsector, buffer);
+ ret = __do_adfu_read(start, nsector, buffer);
+ break;
+ case BOOT_WRITE:
+// if(op->blk > 0)
+// {
+// ret = 0;
+// break;
+// }
+
+// printk("BOOT_WRITE block=%d, page=%d, start=%d, nsector=%d\n", op->blk, op->page, start, nsector);
+// DUMP_BUFFER(buffer, 0, nsector *512);
+ ret = __do_adfu_write(start, nsector, buffer);
+ break;
+ case BOOT_ERASE:
+ break;
+ default:
+ printk("%s,%d,err!check!\n",__FUNCTION__,__LINE__);
+ }
+
+ return ret;
+}
+
+unsigned convert_to_sector(unsigned blk, unsigned page)
+{
+ unsigned secter;
+ secter = SEC_PER_BLOCK*blk + SEC_PER_PAGE*page + 1;
+ return secter;
+}
+
+int calculate_part_num(void)
+{
+ int ret;
+ int i;
+ partition_info_t *mbrinfo_partition = NULL;
+
+ if(is_for_upgrade){
+ mbrinfo_partition = GetMbrFromUser();
+ }else{
+ ret = ReadAfinfo();
+ if (ret != 0) {
+ printk("read mbr form tsd error\n");
+ return -1;
+ }else{
+ printk("%s:ReadAfinfo success\n",__FUNCTION__);
+ }
+ p_afinfo = (afinfo_t *)(mbrc+AFINFO_OFFSET_IN_MBRC_BIN);
+ mbrinfo_partition = (partition_info_t *)(&p_afinfo->partition_info);
+ }
+
+ if(mbrinfo_partition == NULL){
+ printk("%s,%d,get mbrinfo error!\n", __FUNCTION__, __LINE__);
+ return -2;
+ }
+
+ for(i = 0; i < MAX_PARTITION; i++){
+ printk("ptn=0x%x,part_num=0x%x,reserved=0x%x,part_cap=0x%x\n", \
+ mbrinfo_partition[i].flash_ptn,mbrinfo_partition[i].partition_num,\
+ mbrinfo_partition[i].reserved,mbrinfo_partition[i].partition_cap);
+ }
+
+ i = 0;
+ do{
+
+ if(mbrinfo_partition[i].partition_num == 0xFF){
+ i -= 1;
+ break;
+ }
+ /* boot up will check this*/
+ if(i == MAX_PARTITION){
+ i = MAX_PARTITION - 1;
+ break;
+ }
+
+ i++;
+
+ }while(1);
+
+ partition__logic_incard = i;
+ partition_inmbr = partition__logic_incard + 1;
+
+ printk("%s %d,partition_incard:%d,partition_inmbr:%d\n", \
+ __FUNCTION__, __LINE__, partition__logic_incard,partition_inmbr);
+ //include boot part
+ capinfo = (partition_info_t *) kzalloc (partition_inmbr * sizeof(partition_info_t), GFP_KERNEL);
+ //logic part
+ tSD_part = (struct tSD_partinfo *) kzalloc (partition__logic_incard * sizeof(struct tSD_partinfo), GFP_KERNEL);
+ //logic part
+ tSD_device_md.partitions = (tSD_partition_t *) kzalloc (partition__logic_incard * sizeof (tSD_partition_t), GFP_KERNEL);
+
+ if(capinfo == NULL || tSD_part == NULL || tSD_device_md.partitions == NULL){
+ printk("%s, %d, error in alloc zoom!\n", __FUNCTION__, __LINE__);
+ }
+
+ return partition__logic_incard;
+}
+
+
+int init_board_cfg(void)
+{
+ int ret = 0;
+ ret = get_afi_configuration();
+ return ret;
+}
+int get_afi_configuration()
+{
+ int ret = TRUE;
+ partition_info_t *mbrinfo_partition;
+
+ printk("%s %d\n", __FUNCTION__, __LINE__);
+
+ if(is_for_upgrade == 1)
+ {
+ printk("%s %d\n", __FUNCTION__, __LINE__);
+ mbrinfo_partition = GetMbrFromUser();
+ if(!mbrinfo_partition)
+ {
+ printk("GET MBR FROM USER CALLER ERROR!!\n");
+ return ret;
+ }
+
+ MEMCPY(capinfo, mbrinfo_partition, partition_inmbr * sizeof(partition_info_t));
+ }
+ else
+ {
+ printk("%s %d\n", __FUNCTION__, __LINE__);
+ ret = ReadAfinfo();
+ if (ret != 0)
+ {
+ printk("READ MBR FROM NAND ERROR!!\n");
+ return ret;
+ }
+ p_afinfo = (afinfo_t *)(mbrc+AFINFO_OFFSET_IN_MBRC_BIN);
+ MEMCPY(capinfo,&(p_afinfo->partition_info), partition_inmbr * sizeof(partition_info_t));
+ //DATA_BLK_NUM_PER_ZONE = p_afinfo->DataBlkNumInBoot;
+ }
+
+ // g_pcba_test_flag = p_afinfo->pcba_test;
+// printk("AFI config: ce=0x%x, ce-ex= 0x%x,clk=%d, paddrv=0x%x.\n",g_nand_ceconfig, g_nand_ceconfig_ex, g_max_clk_config, g_paddrv_config);
+ printk("%s %d\n", __FUNCTION__, __LINE__);
+ return ret;
+}
+
+partition_info_t *GetMbrFromUser(void)
+{
+ int read_cnt = 0;
+ /* path_from_caller = "/usr/mbr_info.bin"; */
+ mm_segment_t old_fs;
+ struct file *file = NULL;
+ partition_info_t * tmp_partition_info;
+ UINT8 *tmp_p;
+ mbr_info_t* mbr_info = (mbr_info_t *)MALLOC(MBR_SIZE);
+ char *path_from_caller = "/usr/mbr_info.bin"; //2012-7-7 file does not exist yet
+ //char *path_from_caller = "/misc/mbr_info.bin"; //2012-7-7 file does not exist yet
+
+ memset(mbr_info,0,MBR_SIZE);
+ old_fs = get_fs();
+ set_fs(get_ds());
+ file = filp_open(path_from_caller, O_RDONLY, 0); //2012-7-7 11:54 return 0xfffffffe, instead NULL
+
+ printk("%s %d\n", __FUNCTION__, __LINE__);
+
+ if(IS_ERR(file)){
+ printk("%s:OPEN FILE ERROR\n",__FUNCTION__);
+ return NULL;
+ }
+ if (file->f_op->read ==NULL) {
+ printk("FILE CAN'T BE READ!!\n");
+ return NULL;
+ }
+
+ read_cnt = file->f_op->read(file, (unsigned char *)mbr_info, MBR_SIZE, &file->f_pos);
+ if(read_cnt != MBR_SIZE) {
+ printk("ONLY READ %d !!!\n", read_cnt);
+ return NULL;
+ }else {
+ printk("READ MBR_INFO SUCCESSFULLY !!\n");
+ tmp_p = (UINT8*)mbr_info;
+ filp_close(file,NULL);
+ set_fs(old_fs);
+ }
+
+ tmp_partition_info = mbr_info->partition_info;
+ //FREE(mbr_info);
+
+ return tmp_partition_info;
+}
+
+unsigned int GetAfiFromUser(afinfo_t * afinfo)
+{
+ int read_cnt = 0;
+ mm_segment_t old_fs;
+ struct file *file = NULL;
+ unsigned int ret = 0;
+
+ char * path_from_caller = "/usr/afinfo.bin";
+ old_fs = get_fs();
+ set_fs(get_ds());
+ file = filp_open(path_from_caller, O_RDONLY, 0);
+ if (file ==NULL) {
+ printk("OPEN FILE ERROR!!\n");
+ return -1;
+ }
+ if (file->f_op->read ==NULL) {
+ printk("FILE CAN'T BE READ!!\n");
+ return -1;
+ }
+ read_cnt = file->f_op->read(file, (unsigned char *)afinfo, MBR_SIZE, &file->f_pos);
+ if(read_cnt != MBR_SIZE) {
+ printk("ONLY READ %d !!!\n", read_cnt);
+ return -1;
+ }else {
+ printk("READ AFI_INFO SUCCESSFULLY !!\n");
+ filp_close(file,NULL);
+ set_fs(old_fs);
+ }
+
+ return ret;
+}
+
+/*
+ * read afinfo from nand(mbrc), and upated to
+ * corresponding file(mbrc.bin) on rootfs, when upgrading
+ *
+ */
+unsigned int UpdateMbrFromPhyToUsr(unsigned int *p_nand_part, mbr_info_t *p_mbr_info)
+{
+ int i,ret = 0, hdcpKey = 0;
+ /*
+ * chk if hdcp key has been burn, it it does, update /usr/mbrc_info.bin,
+ * mark and tell production to halt process
+ */
+ if(owl_hdcp_is_burn() == 1){
+ printk("%s:hdcp is alrady burn\n",__FUNCTION__);
+ hdcpKey = 1 ;
+ }
+
+ ret = ReadAfinfo();//read capinfo from phy
+ if (ret != 0 || is_force_format == 1) {
+ MEMCPY(capinfo,&(p_mbr_info->partition_info), partition_inmbr * sizeof(partition_info_t));
+ printk("==================================================\n");
+ for(i = 0; i < 13; i++){
+ printk("ptn=0x%x,part_num=0x%x,reserved=0x%x,part_cap=0x%x\n", \
+ capinfo[i].flash_ptn,capinfo[i].partition_num,\
+ capinfo[i].reserved,capinfo[i].partition_cap);
+ }
+ printk("No valid AFINFO on nand(%s)\n", __FUNCTION__);
+ ret = 1;
+ }else{
+ p_afinfo = (afinfo_t *)(mbrc+AFINFO_OFFSET_IN_MBRC_BIN);
+ MEMCPY(capinfo,&(p_afinfo->partition_info), partition_inmbr * sizeof(partition_info_t));
+ ret = UpdateMbrToUsr(capinfo, NULL);
+ }
+
+ ret |= init_tSD_part_myself();
+ for(i=0; i<partition__logic_incard;i++){
+ p_nand_part[i] = tSD_part[i].partsize/(2*1024);
+ printk("i=%d, p_nand_part[i]=%d, capinfo[i].partition_cap=%d\n", i, p_nand_part[i], capinfo[i].partition_cap);
+ }
+
+ if(hdcpKey){
+ ret = -2; //will be recognized & handled in Production.py
+ }
+
+ return ret;
+}
+
+/*
+ * copy/update information from batch production tool
+ * and update to mbrc_info.bin in fs, thus the information
+ * can be write to nand in the end
+ * the upgrade.app in incorporate this information into
+ */
+unsigned int UpdateMbrToUsr(partition_info_t * partition_info_tbl, afinfo_t *p_afi)
+{
+ int read_cnt = 0;
+ int i,write_cnt = 0, mbrinfo_update_flag=0;
+ mm_segment_t old_fs;
+ struct file *file = NULL;
+// partition_info_t * tmp_partition_info;
+ unsigned int ret = 0;
+ mbr_info_t* mbr_info = (mbr_info_t *)MALLOC(MBR_SIZE);
+ char * path_from_caller = "/usr/mbr_info.bin";
+
+ memset(mbr_info,0,MBR_SIZE);
+ old_fs = get_fs();
+ set_fs(get_ds());
+ file = filp_open(path_from_caller, O_RDWR, 0);
+ if (file ==NULL) {
+ printk("OPEN FILE ERROR!!\n");
+ ret = -1;
+ goto out;
+ }
+ if (file->f_op->read ==NULL) {
+ printk("FILE CAN'T BE READ!!\n");
+ ret = -1;
+ goto out;
+ }
+ read_cnt = file->f_op->read(file, (unsigned char *)mbr_info, MBR_SIZE, &file->f_pos);
+ if(read_cnt != MBR_SIZE) {
+ printk("ONLY READ %d !!!\n", read_cnt);
+ ret = -1;
+ goto out;
+ }
+
+ for(i = 0; i < partition_inmbr; i++)
+ {
+ //update cap from phy to mbr
+ if((mbr_info->partition_info[i].partition_cap == 0)
+ && (partition_info_tbl[i].partition_cap != 0))
+ {
+ printk("update cap from phy to mbr,partition[%d],mbr:0x%x,phy:0x%x\n", i, mbr_info->partition_info[i].partition_cap, partition_info_tbl[i].partition_cap);
+ mbr_info->partition_info[i].partition_cap = partition_info_tbl[i].partition_cap;
+ mbrinfo_update_flag = 1;
+ }
+
+ //update cap from mbr to phy
+ if((mbr_info->partition_info[i].partition_cap != 0)
+ && (partition_info_tbl[i].partition_cap == 0))
+ {
+ printk("update cap from mbr to phy,partition[%d],mbr:0x%x,phy:0x%x\n", i, mbr_info->partition_info[i].partition_cap, partition_info_tbl[i].partition_cap);
+ partition_info_tbl[i].partition_cap = mbr_info->partition_info[i].partition_cap;
+ ret |= 1;
+ }
+
+ if(mbr_info->partition_info[i].partition_cap > partition_info_tbl[i].partition_cap)
+ {
+ printk("too large,partition[%d],mbr:0x%x,phy:0x%x\n", i, mbr_info->partition_info[i].partition_cap, partition_info_tbl[i].partition_cap);
+ ret = -1;
+ goto out;
+ }
+ else
+ {
+ printk("update cap from phy to mbr,partition[%d],mbr:0x%x,phy:0x%x\n", i, mbr_info->partition_info[i].partition_cap, partition_info_tbl[i].partition_cap);
+ mbr_info->partition_info[i].partition_cap = partition_info_tbl[i].partition_cap;
+ mbrinfo_update_flag = 1;
+ }
+ }
+
+ /*
+ * update hdcp key & serial no, from mbrc afi to mbrc_info.bin
+ * as sn copied to misc info block(BUG00109876) , this code is obsolete
+ */
+ if(0)
+ {
+ //MEMCPY(&mbr_info->HdcpKey, &p_afi->HdcpKey, 308);
+ MEMCPY(&mbr_info->SerialNo, &p_afi->sn, 16);
+ dump_mem(&p_afi->sn, 16, 0, 1 );
+ }
+
+ if (file->f_op->write ==NULL) {
+ printk("FILE CAN'T BE WRITE!!\n");
+ ret = -1;
+ goto out;
+ }
+
+ if(mbrinfo_update_flag == 1)
+ {
+ file->f_pos = 0;
+ write_cnt = file->f_op->write(file, (unsigned char *)mbr_info, MBR_SIZE, &file->f_pos);
+ if(write_cnt != MBR_SIZE) {
+ printk("ONLY WRITE %d !!!\n", write_cnt);
+ ret = -1;
+ }
+ }
+out://
+ filp_close(file,NULL);
+ set_fs(old_fs);
+
+ kfree(mbr_info);
+ return ret;
+}
+
+
+int owl_part_table_parse(void)
+{
+ int i;
+ unsigned int fixed_cap = 0;
+ sector_t logic_cap;
+
+ for (i = 0; i < partition__logic_incard; i++){
+ if((capinfo [i + 1].partition_cap == 0xffffffff)&&\
+ (capinfo[i+1].partition_num != 0xFF)){
+ unassign_partnum = i;
+ capinfo[i+1].partition_cap = 0;
+ printk ("%s,unassign_partnum : %d\n", __FUNCTION__, unassign_partnum);
+ }
+
+ tSD_part[i].partsize = capinfo[i+1].partition_cap;
+ tSD_part[i].type = PART_FREE;
+
+ fixed_cap += tSD_part[i].partsize; // in MBbyte
+ }
+
+ fixed_cap = 2 * 1024 * fixed_cap; // in sector
+ logic_cap = tSD_get_logic_cat();
+
+ if (fixed_cap >= logic_cap){
+ printk("[EMMC/TSD] No enough space for partition(0-%d). Need(0x%08x), Free(0x%08llx).\n", (UDISK_ACCESS - 1), fixed_cap, logic_cap);
+ return -1;
+ }
+ /*
+ * reserve 2Mb is for hdcp drm etc. key stored
+ * FIXME: has problem ?
+ */
+ tSD_part[unassign_partnum].partsize = (logic_cap/(2*1024))-(fixed_cap/(2*1024)) ;
+ tSD_part[unassign_partnum].type = PART_FREE;
+ //resume capinfo partion cap
+ capinfo[unassign_partnum+1].partition_cap = tSD_part[unassign_partnum].partsize ;
+
+ /* adjust patrition offset */
+ for (i = 0; i < partition__logic_incard; i++){
+ tSD_part[i].off_size = get_cap_offset(i);
+ }
+
+ return 0;
+}
+
+
+int init_tSD_part_myself(void)
+{
+ int i;
+ int ret = 0;
+
+ printk("%s %d\n", __FUNCTION__, __LINE__);
+
+ if(owl_part_table_parse()){
+ printk(" err parse owl_part_table_parse \n");
+ return -1 ;
+ }
+
+
+ printk("----------------tSD_part-------------\n");
+ for (i = 0; i < partition__logic_incard; i++)
+ {
+ tSD_part[i].off_size=tSD_part[i].off_size * 2 * 1024;
+ tSD_part[i].partsize=tSD_part[i].partsize * 2 * 1024;
+ printk("%d(%c).\t offset: 0x%8lx \t size:%lu(MB) \t type:0x%x \n",
+ i, i+'a', tSD_part[i].off_size,
+ tSD_part[i].partsize / 2048,
+ tSD_part[i].type);
+ }
+ printk("----------------tSD_part-------------\n");
+
+
+ return ret;
+
+ //dump_mem(tSD_part, sizeof(tSD_part), 0, 4);
+}
+
+static unsigned int get_cap_offset(int part)
+{
+ int i;
+ unsigned int part_cap = 0;
+
+ for (i = 1; i < (part+1); i++)
+ {
+ part_cap += capinfo[i].partition_cap;
+ }
+ return part_cap;
+}
+
+//get the logic size of the tSD card,not include the phy partition
+//todo
+unsigned int tSD_get_logic_cat(void)
+{
+ unsigned int tSD_logic_size;
+
+ tSD_logic_size = card_total_size;
+
+ if (tSD_logic_size > ((4 * 1024 * 1024) >> 9)+ ((2 * 1024 * 1024) >> 9)) {
+ /* reserve 4MB for mbrec */
+
+
+ tSD_logic_size = tSD_logic_size-\
+ (((4 * 1024 * 1024) >> 9) + ((2 * 1024 * 1024) >> 9));
+ printk("card_total_siz:%u,tSD_logic_size:%u\n",card_total_size,tSD_logic_size);
+ }
+ return tSD_logic_size;
+}
+
+int tSD_adfu_read(unsigned long start, unsigned long nsector, void *buf, struct uparam * adfu_uparam)
+{
+ unsigned int flash_part = adfu_uparam->flash_partition;
+
+// down(&nand_blk_device.mutex);
+
+ if ((start + nsector) > tSD_part[flash_part].partsize) {
+// up(&nand_blk_device.mutex);
+ return -1;
+ }
+ //todo:add the phy off_size
+ start += tSD_part[flash_part].off_size + SEC_PHY_BLOCK;
+
+ if (__do_adfu_read(start, nsector, buf))
+ {
+ printk("read err\n");
+// up(&nand_blk_device.mutex);
+ return -1;
+ }
+// up(&nand_blk_device.mutex);
+ return nsector;
+}
+//EXPORT_SYMBOL(tSD_adfu_read);
+
+int tSD_adfu_write(unsigned long start, unsigned long nsector, void *buf, struct uparam * adfu_uparam)
+{
+ unsigned int flash_part = adfu_uparam->flash_partition;
+
+// down(&nand_blk_device.mutex);
+
+ if ((start + nsector) > tSD_part[flash_part].partsize) {
+ printk("%s: Error:opearte partion size\n",__FUNCTION__);
+ return -1;
+ }
+ //todo:add the phy partion off_size
+ start += tSD_part[flash_part].off_size + SEC_PHY_BLOCK;
+
+// printk("flash_part=%d, adfu_write(start=%d, nsector%d)\n", flash_part, start, nsector);
+ if (__do_adfu_write(start, nsector, buf))
+ {
+ printk("write err\n");
+// up(&nand_blk_device.mutex);
+ return -1;
+ }
+// up(&nand_blk_device.mutex);
+ return nsector;
+}
+//EXPORT_SYMBOL(tSD_adfu_write);
+
+unsigned int tSD_op_read(unsigned long start, unsigned long nsector, void *buf, struct inode * i)
+{
+ struct uparam adfu_uparam;
+
+ adfu_uparam.flash_partition = i->i_bdev->bd_disk->first_minor >> 3;
+
+ tSD_adfu_read(start, nsector, buf, &adfu_uparam);
+
+ return 0;
+}
+
+unsigned int tSD_op_write(unsigned long start, unsigned long nsector, void *buf, struct inode * i)
+{
+ struct uparam adfu_uparam;
+
+ adfu_uparam.flash_partition = i->i_bdev->bd_disk->first_minor >> 3;
+
+ tSD_adfu_write(start, nsector, buf, &adfu_uparam);
+
+ return 0;
+}
+unsigned int ReadAfinfo()
+{
+ int i,j;
+ unsigned int ret = -1;
+ unsigned int MbrPageNum;
+ unsigned short checksum;
+ unsigned short checksum1;
+ unsigned short flag ;
+ boot_op_t op;
+
+ MbrPageNum = (MBRC_SECTOR_SIZE-1 + SEC_PER_BOOT_PAGE)/SEC_PER_BOOT_PAGE;
+ mbrc = kmalloc(MbrPageNum*SEC_PER_BOOT_PAGE*512, GFP_KERNEL); /* mbr_info ocuppies 2 sector*/
+ if(mbrc == NULL)
+ {
+ printk("Boot Malloc Error!!\n");
+ return 1;
+ }
+ else
+ {
+ printk("Boot Malloc %x, MbrPageNum=%d, %d!!\n",(unsigned int)mbrc, MbrPageNum, MbrPageNum*SEC_PER_BOOT_PAGE*512);
+ }
+
+ for (i = 0; i < 4; i++)
+ {
+ op.buffer = mbrc;
+ op.blk = i;
+ for (j = 0;j < MbrPageNum; j++)
+ {
+ op.page = j;
+ ret = boot_phy_op_entry(&op, BOOT_READ);
+ op.buffer += SEC_SIZE * SEC_PER_BOOT_PAGE;
+ }
+
+ checksum = (unsigned int)calCRC(mbrc + 0x400, (MBRC_SIZE-0x400-4), 4) + 0x1234;
+ checksum1 = *(unsigned int*)(mbrc+MBRC_SIZE-4);
+ flag = *(unsigned short*)(mbrc+MBRC_SIZE-6);
+
+ if ((checksum==checksum1)&&(flag==0x55aa))
+ {
+ printk("Read mbrc checksum success\n");
+ ret = 0;
+ break;
+ }
+ else
+ {
+ ret = -1;
+ //printk("Read mbrc checksum failed: calsum:0x%08x,srcsum:0x%08x,flag:0x%08x\n",\
+ // checksum,checksum1,flag);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * calCRC - cal CRC by nBytes
+ * @buf: data buf
+ * @length: data length
+ * @nBytes: byte ruler,support 2Bytes & 4Bytes
+ */
+unsigned int calCRC(unsigned char *buf, unsigned int length, unsigned char nBytes)
+{
+ unsigned int i=0,j=0,checkSum=0;
+ unsigned short checkSumShort=0;
+
+ if((length==0) || (nBytes==0))
+ {
+ return 0;
+ }
+
+ if(nBytes == 2)
+ {
+ for(i=0; i<(length/2); i++)
+ {
+ checkSumShort += ((unsigned short *)buf)[i];
+ }
+ return checkSumShort;
+ }
+ else if(nBytes == 4)
+ {
+ for(i=0; i<(length/4); i++)
+ {
+ checkSum += ((unsigned int *)buf)[i];
+ }
+ return checkSum;
+ }
+ else
+ {
+ for(i=0; i<(length/nBytes); i++)
+ {
+ for(j=0; j<nBytes; j++)
+ {
+ checkSum += (buf[nBytes*i+j])<<(j*8);
+ }
+ }
+ return checkSum;
+ }
+}
+
+
+/**
+ * cal_key_checksum - cal CRC by nBytes
+ * @buf: data buf
+ * @length: data length
+ * @nBytes: byte ruler,support 2Bytes & 4Bytes
+ */
+unsigned int cal_key_checksum(unsigned char *buf, unsigned int length, unsigned char nBytes)
+{
+ unsigned int i=0,j=0,checkSum=0;
+ unsigned short checkSumShort=0;
+
+ if((length==0) || (nBytes==0))
+ {
+ return 0;
+ }
+
+
+ #if 0
+ for(i=0; i<(length/nBytes); i++)
+ {
+ for(j=0; j<nBytes; j++)
+ {
+ checkSum += (buf[(nBytes*i)+j]<<(j*8));
+
+ if(nBytes == 2)
+ {
+ checkSumShort += ((unsigned short)buf[(nBytes*i)+j]<<(j*8));
+ }
+ }
+ }
+
+ if(nBytes == 2)
+ {
+ return checkSumShort;
+ }
+ return checkSum;
+
+ #else
+ if(nBytes == 2)
+ {
+ for(i=0; i<(length/2); i++)
+ {
+ checkSumShort += ((unsigned short *)buf)[i];
+ }
+ return checkSumShort ^ 0x55aa;
+ }
+ else if(nBytes == 4)
+ {
+ for(i=0; i<(length/4); i++)
+ {
+ checkSum += ((unsigned int *)buf)[i];
+ }
+ return checkSum ^ 0x55aa;
+ }
+ else
+ {
+ for(i=0; i<(length/nBytes); i++)
+ {
+ for(j=0; j<nBytes; j++)
+ {
+ checkSum += (buf[nBytes*i+j])<<(j*8);
+ }
+ }
+ return checkSum ^ 0x55aa;
+ }
+
+ #endif
+}
+
+
+
+ #if 0
+/*
+ *
+ */
+
+int NAND_MiscInfoBlkBakup(unsigned char *buf)
+{
+ int ret = 0;
+
+ __do_adfu_read(card_total_size - 2048 + 1, 32, buf);
+ __do_adfu_write(card_total_size - 2048 + 1 + 32, 32, buf);
+
+ return ret;
+}
+#endif
+
+/*
+1: hdcp is burn
+others : not burn
+*/
+
+static int owl_hdcp_is_burn(void)
+{
+ int count=0;
+
+ unsigned int offset;
+ struct MiscInfoType_t Hdcp;
+
+ offset = (unsigned int )(&MiscInfo.Hdcp) - (unsigned int )(&MiscInfo);
+ count = do_rw_miscinfo(offset,(char*)(&Hdcp),
+ sizeof(struct MiscInfoType_t),MISC_INFO_READ);
+ if(count != sizeof(struct MiscInfoType_t)){
+ printk("err:%d:read owl_hdcp_is_burn fail\n",count);
+ return -1 ;
+ }
+
+ if(Hdcp.Burn== BURN_FLAG){
+ return 1;
+ }else{
+ return 0;
+ }
+
+}
+
+static int owl_miscinfo_is_burn(void)
+{
+ int count=0;
+
+ unsigned int offset;
+ unsigned int burn = 0;
+
+ offset = (unsigned int )(&MiscInfo.Burn) - (unsigned int )(&MiscInfo);
+ count = do_rw_miscinfo(offset,(char*)(&burn),
+ sizeof(burn),MISC_INFO_READ);
+ if(count != sizeof(burn)){
+ printk("err:%d:read miscinfo_is_burn fail\n",count);
+ return -1 ;
+ }
+
+ if(burn== BURN_FLAG){
+ return 1;
+ }else{
+ return 0;
+ }
+
+}
+/*
+ * wr_flag : 1 (read), 0 (write)
+ */
+static int do_rw_miscinfo (unsigned int offset, char *buf, int size, int wr_flag)
+{
+ unsigned int mf_offset;
+ unsigned int mf_sector_num, mf_sector_cnt;
+ unsigned int buf_start;
+ char *addr = NULL;
+ int ret = 0;
+
+ //printk("%s, %s %d bytes in %s\n", __FUNCTION__, (wr_flag ? "read" : "write"), size, mf_type->Name);
+
+ //mf_offset = mf_type->Offset;
+ //mf_size = mf_type->Size;
+
+ mf_offset = offset;
+
+
+ //if (size > mf_size) {
+ // printk (KERN_ERR"wite %d bytes to %s, max_size is %d!\n", size, mf_type->Name, mf_type->Size);
+ // return -ENOMEM;
+ //}
+
+ mf_sector_num = mf_offset / 512;
+ mf_sector_cnt = size / 512;
+ buf_start = mf_offset % 512;
+
+ if (mf_offset % 512 || size % 512) {
+ if ((mf_offset % 512 + size % 512) > 512)
+ mf_sector_cnt += 2;
+ else
+ mf_sector_cnt += 1;
+ }
+
+ addr = (char *)kmalloc(mf_sector_cnt * 512, GFP_KERNEL);
+ if (addr == NULL){
+ printk (KERN_ERR"kmalloc %d bytes err!\n", mf_sector_cnt * 512);
+ return -ENOMEM;
+ }
+ ret = __do_adfu_read(miscinfo_start_addr + mf_sector_num, mf_sector_cnt, addr);
+
+
+ //printk ("read from emmc:\n" );
+ //dump_mem_f(addr + buf_start,size);
+//
+ if (wr_flag == MISC_INFO_READ){
+ memcpy(buf, addr + buf_start, size);
+ } else {
+ memcpy(addr + buf_start, buf, size);
+ ret = __do_adfu_write(miscinfo_start_addr + mf_sector_num, mf_sector_cnt, addr);
+ //WIRTE BAK
+ ret = __do_adfu_write(miscinfo_start_addr + mf_sector_num + MISC_INFO_SECTERS, mf_sector_cnt, addr);
+
+ udelay(10);
+ ret = __do_adfu_read(miscinfo_start_addr + mf_sector_num, mf_sector_cnt, addr);
+ //printk ("read after write emmc:\n");
+ //dump_mem_f(addr + buf_start,size);
+ }
+
+ if (addr)
+ kfree(addr);
+
+ return size;
+}
+
+/*
+ * write misc info block
+ * [in] misc info type
+ * [in] requested buffer for the misc info
+ * [in] requested data size
+ */
+
+
+
+int NAND_WriteMiscInfo(int type, char *buf, int size)
+{
+ int count=0;
+ int ret=0;
+ unsigned int offset;
+
+ _miscMetuxLock();
+
+
+ printk("NAND_WriteMiscInfo size %d\n",size);
+
+ switch (type){
+ case MISC_INFO_TYPE_SN:
+ //wirte misc infor value
+ if(size > SN_SIZE){
+ size = SN_SIZE;
+ }
+
+ count = do_rw_miscinfo(MiscInfo.Sn.Offset,buf,size,MISC_INFO_WRITE);
+ if(count!= size){
+ printk("err:%d,write Sn do_rw_miscinfo\n",count);
+ return -1;
+ }
+ MiscInfo.Sn.Size = count;
+ MiscInfo.Sn.Burn = BURN_FLAG;
+ offset = (unsigned int)(&MiscInfo.Sn) - (unsigned int)(&MiscInfo);
+ ret = do_rw_miscinfo(offset,(char*)(&MiscInfo.Sn),\
+ sizeof(struct MiscInfoType_t),MISC_INFO_WRITE);
+ if(ret != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,write Sn struct MiscInfoType_t\n",ret);
+ return -1;
+ }
+
+ break;
+ case MISC_INFO_TYPE_DRM:
+ //wirte misc infor value
+ if(size > DRM_KEY_SIZE){
+ size = DRM_KEY_SIZE;
+ }
+
+ count = do_rw_miscinfo(MiscInfo.Drm.Offset,buf,size,MISC_INFO_WRITE);
+ if(count!= size){
+ printk("err:%d,write Drm do_rw_miscinfo\n",count);
+ return -1;
+ }
+ MiscInfo.Drm.Size = count;
+ MiscInfo.Drm.Burn = BURN_FLAG;
+ offset = (unsigned int)(&MiscInfo.Drm) - (unsigned int)(&MiscInfo);
+ ret = do_rw_miscinfo(offset,(char*)(&MiscInfo.Drm),\
+ sizeof(struct MiscInfoType_t),MISC_INFO_WRITE);
+ if(ret != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,write Drm struct MiscInfoType_t\n",ret);
+ return -1;
+ }
+
+ break;
+ case MISC_INFO_TYPE_HDCP:
+ //wirte misc infor value
+ if(size > HDCP_KEY_SIZE){
+ size = HDCP_KEY_SIZE;
+ }
+
+ count = do_rw_miscinfo(MiscInfo.Hdcp.Offset,buf,size,MISC_INFO_WRITE);
+ if(count!= size){
+ printk("err:%d,write Hdcp do_rw_miscinfo\n",count);
+ return -1;
+ }
+ MiscInfo.Hdcp.Size = count;
+ if(MiscInfo.Hdcp.Size == HDCP_KEY_SIZE){
+ MiscInfo.Hdcp.Burn = BURN_FLAG;
+ }else{
+ MiscInfo.Hdcp.Burn = 0;
+ }
+
+ offset = (unsigned int)(&MiscInfo.Hdcp) - (unsigned int)(&MiscInfo);
+ ret = do_rw_miscinfo(offset,(char*)(&MiscInfo.Hdcp),\
+ sizeof(struct MiscInfoType_t),MISC_INFO_WRITE);
+ if(ret != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,write Hdcp struct MiscInfoType_t\n",ret);
+ return -1;
+ }
+ break;
+
+ case MISC_INFO_TYPE_DEVNUM:
+ //wirte misc infor value
+ if(size > DEVNUM_SIZE){
+ size = DEVNUM_SIZE;
+ }
+
+ count = do_rw_miscinfo(MiscInfo.DevNum.Offset,buf,size,MISC_INFO_WRITE);
+ if(count!= size){
+ printk("err:%d,write DevNum do_rw_miscinfo\n",count);
+ return -1;
+ }
+ MiscInfo.DevNum.Size = count;
+ MiscInfo.DevNum.Burn = BURN_FLAG;
+ offset = (unsigned int)(&MiscInfo.DevNum) - (unsigned int)(&MiscInfo);
+ ret = do_rw_miscinfo(offset,(char*)(&MiscInfo.DevNum),\
+ sizeof(struct MiscInfoType_t),MISC_INFO_WRITE);
+ if(ret != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,write DevNum struct MiscInfoType_t\n",ret);
+ return -1;
+ }
+
+ break;
+
+ case MISC_INFO_TYPE_EXT:
+ //wirte misc infor value
+ if(size > EXTSPACE_SIZE){
+ size = EXTSPACE_SIZE;
+ }
+
+ count = do_rw_miscinfo(MiscInfo.ExtSpace.Offset,buf,size,MISC_INFO_WRITE);
+ if(count!= size){
+ printk("err:%d,write ExtSpace do_rw_miscinfo\n",count);
+ return -1;
+ }
+ MiscInfo.ExtSpace.Size = count;
+ MiscInfo.ExtSpace.Burn = BURN_FLAG;
+ offset = (unsigned int)(&MiscInfo.ExtSpace) - (unsigned int)(&MiscInfo);
+ ret = do_rw_miscinfo(offset,(char*)(&MiscInfo.ExtSpace),\
+ sizeof(struct MiscInfoType_t),MISC_INFO_WRITE);
+ if(ret != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,write ExtSpace struct MiscInfoType_t\n",ret);
+ return -1;
+ }
+
+ break;
+
+ default:
+ printk (KERN_ERR"miscinfo write type not define!\n");
+ break;
+ }
+#if 1
+ // chage misc burn flag
+ offset = (unsigned int )(&MiscInfo.Burn) - (unsigned int )(&MiscInfo);
+ MiscInfo.Burn = BURN_FLAG;
+ count = do_rw_miscinfo (offset, (char*)(&MiscInfo.Burn), sizeof(MiscInfo.Burn), MISC_INFO_WRITE);
+ if(count < 0){
+ printk("%s:err:%d:bakup miscinfor init state fail\n",__FUNCTION__,count);
+ return -1;
+ }
+#endif
+ _miscMetuxUnlock();
+
+ return count;
+}
+
+/*
+ * must hold semaphore
+ */
+ int NAND_GetMiscInfo(int type, char *buf, int size)
+{
+ int count=0;
+ int read_size=0;
+ int headsize=0; //head_size
+ unsigned int offset;
+
+ switch (type) {
+ case MISC_INFO_TYPE_SN:
+ // get size
+ offset = (unsigned int)(&MiscInfo.Sn) - (unsigned int)(&MiscInfo);
+ headsize = do_rw_miscinfo (offset,(char*)(&MiscInfo.Sn),\
+ sizeof(struct MiscInfoType_t), MISC_INFO_READ);
+ if(headsize != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,read sn headsize\n",headsize);
+ return -1;
+ }
+
+ read_size = MiscInfo.Sn.Size;
+ if(size < read_size){
+ read_size = size;
+ }
+ printk("sn read size %d\n",read_size);
+
+ count = do_rw_miscinfo (MiscInfo.Sn.Offset, buf, read_size, MISC_INFO_READ);
+ if( count != read_size ){
+ printk("err:%d,read sn \n",count);
+ return -1;
+ }
+ break;
+
+ case MISC_INFO_TYPE_DRM:
+ // get size
+ offset = (unsigned int)(&MiscInfo.Drm) - (unsigned int)(&MiscInfo);
+ headsize = do_rw_miscinfo (offset,(char*)(&MiscInfo.Drm),\
+ sizeof(struct MiscInfoType_t), MISC_INFO_READ);
+ if(headsize != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,read Drm headsize\n",headsize);
+ return -1;
+ }
+
+ read_size = MiscInfo.Drm.Size;
+ if(size < read_size){
+ read_size = size;
+ }
+ printk("Drm read size %d\n",read_size);
+ count = do_rw_miscinfo (MiscInfo.Drm.Offset, buf, read_size, MISC_INFO_READ);
+ if( count != read_size ){
+ printk("err:%d,read Drm \n",count);
+ return -1;
+ }
+ break;
+
+ case MISC_INFO_TYPE_HDCP:
+ // get size
+ offset = (unsigned int)(&MiscInfo.Hdcp) - (unsigned int)(&MiscInfo);
+ headsize = do_rw_miscinfo (offset,(char*)(&MiscInfo.Hdcp),\
+ sizeof(struct MiscInfoType_t), MISC_INFO_READ);
+ if(headsize != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,read Hdcp headsize\n",headsize);
+ return -1;
+ }
+
+ read_size = MiscInfo.Hdcp.Size;
+ if(size < read_size){
+ read_size = size;
+ }
+ printk("Hdcp read size %d\n",read_size);
+ count = do_rw_miscinfo (MiscInfo.Hdcp.Offset, buf, read_size, MISC_INFO_READ);
+ if( count != read_size ){
+ printk("err:%d,read Hdcp \n",count);
+ return -1;
+ }
+
+ break;
+
+ case MISC_INFO_TYPE_DEVNUM:
+ // get size
+ offset = (unsigned int)(&MiscInfo.DevNum) - (unsigned int)(&MiscInfo);
+ headsize = do_rw_miscinfo (offset,(char*)(&MiscInfo.DevNum),\
+ sizeof(struct MiscInfoType_t), MISC_INFO_READ);
+ if(headsize != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,read DevNum headsize\n",headsize);
+ return -1;
+ }
+
+ read_size = MiscInfo.DevNum.Size;
+ if(size < read_size){
+ read_size = size;
+ }
+ printk("DevNum read size %d\n",read_size);
+ count = do_rw_miscinfo (MiscInfo.DevNum.Offset, buf, read_size, MISC_INFO_READ);
+ if( count != read_size ){
+ printk("err:%d,read DevNum \n",count);
+ return -1;
+ }
+ break;
+
+ case MISC_INFO_TYPE_EXT:
+ // get size
+ offset = (unsigned int)(&MiscInfo.ExtSpace) - (unsigned int)(&MiscInfo);
+ headsize = do_rw_miscinfo (offset,(char*)(&MiscInfo.ExtSpace),\
+ sizeof(struct MiscInfoType_t), MISC_INFO_READ);
+ if(headsize != sizeof(struct MiscInfoType_t)){
+ printk("err:%d,read ExtSpace headsize\n",headsize);
+ return -1;
+ }
+
+ read_size = MiscInfo.ExtSpace.Size;
+ if(size < read_size){
+ read_size = size;
+ }
+ printk("ExtSpace read size %d\n",read_size);
+ count = do_rw_miscinfo (MiscInfo.ExtSpace.Offset, buf, read_size, MISC_INFO_READ);
+ if( count != read_size ){
+ printk("err:%d,read ExtSpace \n",count);
+ return -1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return count;
+}
+//EXPORT_SYMBOL(NAND_GetMiscInfo);
+
+
+/*
+* FIXME: pls
+*/
+void NAND_InitMiscInfo(void)
+{
+ int ret = 0;
+ char *addr = NULL;
+ unsigned miscinfo_size;
+ unsigned int sector_cnt;
+
+ printk("[tsd/emmc]NAND_MISC_INFO ON\n");
+
+ _miscMetuxInit();
+
+ MiscInfo.Drm.Offset = sizeof(struct MiscInfoBlk_t);
+ MiscInfo.Hdcp.Offset = MiscInfo.Drm.Offset+ MiscInfo.Drm.Size;
+ MiscInfo.Sn.Offset = MiscInfo.Hdcp.Offset + MiscInfo.Hdcp.Size;
+ MiscInfo.DevNum.Offset = MiscInfo.Sn.Offset + MiscInfo.Sn.Size;
+ MiscInfo.ExtSpace.Offset = MiscInfo.DevNum.Offset + MiscInfo.DevNum.Size;
+
+ if (MiscInfo.TotalSize > (1024 * 1024)){
+ printk (KERN_ERR"%s, 1 MB for MiscInfo isn't enough, need: %d\n", __FUNCTION__, MiscInfo.TotalSize);
+ } else {
+ /* 1M for write infor ,1M for bak infor*/
+ miscinfo_start_addr = card_total_size - 2*MISC_INFO_SECTERS ;
+ if(owl_miscinfo_is_burn() == 1){
+ printk("owl_miscinfo is burn\n");
+ }else{
+ printk("owl_miscinfo is not burn\n");
+ /* init the msic_info space head */
+ miscinfo_size = sizeof (struct MiscInfoBlk_t);
+ sector_cnt = miscinfo_size / 512;
+ if (miscinfo_size % 512)
+ sector_cnt++;
+ addr = (char *) kmalloc (sector_cnt * 512, GFP_KERNEL);
+ if (addr == NULL){
+ printk (KERN_ERR"%s, alloc memery for misc_info Fail!\n", __FUNCTION__);
+ return;
+ }
+ memset(addr, 0, miscinfo_size);
+ memcpy(addr, &MiscInfo, miscinfo_size);
+
+ ret = __do_adfu_write(miscinfo_start_addr, sector_cnt, addr);
+ if (ret)
+ printk (KERN_ERR"%s, write misc_info head data err!\n", __FUNCTION__);
+ //for backup function
+ ret = __do_adfu_write(miscinfo_start_addr+MISC_INFO_SECTERS, sector_cnt, addr);
+ if (ret){
+ printk (KERN_ERR"%s, write bak misc_info head data err!\n", __FUNCTION__);
+ }
+ if (addr){
+ kfree(addr);
+ }
+ }
+ }
+}
+
+/*
+ *
+ */
+void NAND_ShowMiscInfoAll(void)
+{
+ char *buf;
+ buf = MALLOC(512);
+ //NAND_GetMiscInfo(MISC_INFO_TYPE_SN);
+ NAND_GetMiscInfo(MISC_INFO_TYPE_HDCP, buf, 308);
+ dump_mem(buf, 308, 0 ,1);
+}
+
+
+module_init(mmc_blk_init);
+module_exit(mmc_blk_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
+
+
diff --git a/drivers/mmc/card/tsd_block.h b/drivers/mmc/card/tsd_block.h
new file mode 100755
index 0000000..e0a9efb
--- /dev/null
+++ b/drivers/mmc/card/tsd_block.h
@@ -0,0 +1,203 @@
+#ifndef TSD_BLOCK_H
+#define TSD_BLOCK_H
+
+#define PLATFORMINC ../../../../owl/platform/include
+#define BOOTINC ../../../../owl/platform/boot/include
+
+#include "../../../../owl/platform/include/asoc_ioctl.h"
+#include "../../../../owl/platform/include/mbr_info.h" //for CapInfo_t
+
+#ifndef TRUE
+#define TRUE 0
+#endif
+
+#ifndef FALSE
+#define FALSE 1
+#endif
+
+#ifndef NULL
+#define NULL (void *)0
+#endif
+
+
+
+struct MiscInfoType_t
+{
+ unsigned short Magic;
+ unsigned short InfoType; //sounds not necessary
+ unsigned short Size; //size of one misc info
+ unsigned short Offset; //offset from the beginning
+ unsigned short Chksum;
+ unsigned int Burn;
+ unsigned char Name[64];
+};
+
+// MISC INFORMATION BLOCK
+struct MiscInfoBlk_t
+{
+ unsigned short die;
+ unsigned short sblk;
+ unsigned short sblkBak; //for backup
+ unsigned short TotalSize;
+ unsigned int Burn;
+ struct MiscInfoType_t Drm;
+ struct MiscInfoType_t Hdcp;
+ struct MiscInfoType_t Sn;
+ struct MiscInfoType_t DevNum;
+
+ struct MiscInfoType_t ExtSpace;
+ struct MiscInfoType_t Reserved[3];
+};
+
+#define BOOT_PHY_SIZE (1024*1024)
+#define MBRC_NUM 4
+#define BURN_FLAG 0xabcddcba
+#define SECTOR_SIZE 512
+
+#define FORMATBYTE 4096
+#define PSECTBYTE 512
+
+
+#define DISABLE_WRITE _IO('V',0)
+#define ENABLE_WRITE _IO('V',1)
+#define DISABLE_READ _IO('V',2)
+#define ENABLE_READ _IO('V',3)
+
+#define NAND_IS_SMALL_BLOCK 0
+#define DATA_BLK_NUM_PER_ZONE 0
+#define FLASH_TYPE_CARD (0x03)
+
+#define TSD_BLOCKSZ 512
+#define TSD_WRITE 1
+#define TSD_READ 0
+
+//BOOT INFO:define in tSD card
+#define SEC_SIZE 512 //in bytes
+#define PAGE_PER_BLK 256
+#define SEC_PER_PAGE 8 // 4k
+#define SEC_PER_BOOT_PAGE 8
+#define SEC_PER_BLOCK (PAGE_PER_BLK * SEC_PER_PAGE) // 1M
+#define BLOCK_IN_PHY 4
+#define SEC_PHY_BLOCK (SEC_PER_BLOCK * BLOCK_IN_PHY) // 4M
+#define LOGIC_SEC_START SEC_PHY_BLOCK
+
+#define REMAIN_SPACE 0
+#define PART_FREE 0x55
+#define PART_DUMMY 0xff
+#define PART_READONLY 0x85
+#define PART_WRITEONLY 0x86
+#define PART_NO_ACCESS 0x87
+
+#define NAND_PART_OP_RO (1 << 0) // read only.
+#define NAND_PART_OP_WO (1 << 1) // write only.
+#define NAND_PART_OP_NA (1 << 2) // not accessable.
+
+//define for compile
+typedef unsigned char UINT8;
+//#define TRUE 1
+#define MALLOC(size) kmalloc(size, GFP_KERNEL)
+#define FREE(d) kfree(d)
+#define MBR_SIZE 1024
+#define MEMCPY(to,from,size) memcpy(to,from,size)
+#define MEMSET(s,c,size) memset(s,c,size)
+
+
+struct tSD_partinfo {
+ unsigned long partsize;
+ unsigned long off_size;
+ unsigned char type;
+};
+
+
+typedef struct __tSD_partition_t
+{
+ int num; //current partion number
+ unsigned long size; // space size.
+ unsigned long offset; //offset in the who nand device.
+ unsigned int attr; //attribute.
+ struct gendisk * disk; //gendisk to register to system.
+// struct mmc_blk_data *md;
+}tSD_partition_t;
+
+typedef struct __boot_medium_info_t
+{
+ unsigned int medium_type; //0x1 large block nand; 0x0 small block nand.
+ unsigned short pageAddrPerBlock;
+ unsigned short pagePerBlock;
+ unsigned int sec_per_page;
+ unsigned int sec_per_boot_page;
+ unsigned int ecc_bits; //boot ecc config-bchX.
+ unsigned int ud_bytes; //userdata bytes count per ecc unit.
+ unsigned char slc_mode; //share page slc mode?
+ unsigned char tlc_enhance_cmd;
+ unsigned char reserve[2];
+ unsigned int data_blk_per_zone; //
+ unsigned char chipid[64]; //chipid item without Mark.
+ unsigned char lsb_tbl[128]; //useable when nand need use share page slc mode.
+ unsigned char badblk_tbl[80]; //bad block table.
+} boot_medium_info_t;
+
+typedef struct _boot_op_t
+{
+ unsigned int blk;
+ unsigned int page;
+ unsigned char * buffer;
+}boot_op_t;
+
+
+/*
+ * define for tsd card.
+ */
+struct mmc_blk_data {
+ char *name; //predefined device name.
+ int major; //predefined major device number.
+ int minorbits;
+ int users;
+ const char *subname;
+
+ struct semaphore mutex;
+ struct semaphore thread_sem;
+ spinlock_t *lock;
+ spinlock_t slock;
+ spinlock_t dlock;
+ spinlock_t ulock;
+ struct gendisk *disk;
+ tSD_partition_t *partitions;
+ //tSD_partition_t partitions[MAX_PARTITION];
+#define QUEUE_NUM 3
+ int req_state;
+ struct mmc_queue *queue;
+ struct mmc_queue squeue;
+ struct mmc_queue dqueue;
+ struct mmc_queue uqueue;
+ struct list_head part;
+ struct task_struct *thread;
+
+ unsigned int flags;
+#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
+#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
+
+
+ unsigned int usage;
+ unsigned int read_only;
+ unsigned int part_type;
+ unsigned int name_idx;
+ unsigned int reset_done;
+#define MMC_BLK_READ BIT(0)
+#define MMC_BLK_WRITE BIT(1)
+#define MMC_BLK_DISCARD BIT(2)
+#define MMC_BLK_SECDISCARD BIT(3)
+
+ /*
+ * Only set in main mmc_blk_data associated
+ * with mmc_card with mmc_set_drvdata, and keeps
+ * track of the current selected device partition.
+ */
+ unsigned int part_curr;
+ struct device_attribute force_ro;
+ struct device_attribute power_ro_lock;
+ int area_type;
+};
+
+#endif
diff --git a/drivers/mmc/card/tsd_queue.c b/drivers/mmc/card/tsd_queue.c
new file mode 100755
index 0000000..e7adc94
--- /dev/null
+++ b/drivers/mmc/card/tsd_queue.c
@@ -0,0 +1,600 @@
+/*
+ * linux/drivers/mmc/card/queue.c
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/scatterlist.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "tsd_queue.h"
+#include "tsd_block.h"
+
+
+#define MMC_QUEUE_BOUNCESZ 65536
+
+/*
+ * Prepare a MMC request. This just filters out odd stuff.
+ */
+static int mmc_prep_request(struct request_queue *q, struct request *req)
+{
+ struct mmc_queue *mq = q->queuedata;
+
+ /*
+ * We only like normal block requests and discards.
+ */
+ if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
+ blk_dump_rq_flags(req, "MMC bad request");
+ return BLKPREP_KILL;
+ }
+
+ if (mq && mmc_card_removed(mq->card))
+ return BLKPREP_KILL;
+
+ req->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+}
+
+
+
+
+
+/*
+ * modify for tsd card
+ */
+
+static void tSD_finish_req(struct mmc_blk_data * tSD_device, struct mmc_queue *mq)
+{
+ struct request_queue *q = mq->queue;
+ struct mmc_queue_req *tmp;
+ struct request *req= NULL;
+ unsigned int cmd_flags = 0;
+
+ spin_lock_irq(q->queue_lock);
+ //set_current_state(TASK_INTERRUPTIBLE);
+ req = blk_fetch_request(q);
+ mq->mqrq_cur->req = req;
+ spin_unlock_irq(q->queue_lock);
+
+ if (req || mq->mqrq_prev->req) {
+ set_current_state(TASK_RUNNING);
+ tSD_device->req_state = REQ_EXIST;
+ cmd_flags = req ? req->cmd_flags : 0;
+ mq->issue_fn(mq, req);
+ if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ return ; /* fetch again */
+ }
+
+ /*
+ * Current request becomes previous request
+ * and vice versa.
+ * In case of special requests, current request
+ * has been finished. Do not assign it to previous
+ * request.
+ */
+ if (cmd_flags & MMC_REQ_SPECIAL_MASK)
+ mq->mqrq_cur->req = NULL;
+
+ mq->mqrq_prev->brq.mrq.data = NULL;
+ mq->mqrq_prev->req = NULL;
+ tmp = mq->mqrq_prev;
+ mq->mqrq_prev = mq->mqrq_cur;
+ mq->mqrq_cur = tmp;
+ }
+}
+
+int tSD_queue_thread(void *d)
+{
+ unsigned long flags;
+ struct mmc_blk_data *tSD_device = d;
+ struct mmc_queue *mq;
+
+ current->flags |= PF_MEMALLOC | PF_NOFREEZE;
+ //daemonize("tsd-dev-rw");
+
+ task_lock(current);
+ snprintf(current->comm, sizeof(current->comm), "%s", "tsd-dev-rw");
+ task_unlock(current);
+
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ sigfillset(¤t->blocked);
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+
+ down(&tSD_device->thread_sem);
+ do{
+ tSD_device->req_state = REQ_NONE;
+//sq_handle:
+ mq = &tSD_device->squeue;
+ tSD_device->lock = &tSD_device->slock;
+ tSD_device->queue = &tSD_device->squeue;
+ tSD_finish_req(tSD_device, mq);
+
+//dq_handle:
+ mq = &tSD_device->dqueue;
+ tSD_device->lock = &tSD_device->dlock;
+ tSD_device->queue = &tSD_device->dqueue;
+ tSD_finish_req(tSD_device, mq);
+
+//uq_handle:
+ mq = &tSD_device->uqueue;
+ tSD_device->lock = &tSD_device->ulock;
+ tSD_device->queue = &tSD_device->uqueue;
+ tSD_finish_req(tSD_device, mq);
+
+ // no more req in queue need to handle
+ if(REQ_NONE == tSD_device->req_state)
+ {
+ //printk("%s,%d\n",__FUNCTION__,__LINE__);
+ if (kthread_should_stop()) {
+ set_current_state(TASK_RUNNING);
+ break;
+ }
+// printk("%s,%d, -->schedule-->tsd_thread\n",__FUNCTION__,__LINE__);
+ up(&tSD_device->thread_sem);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ //printk(" %s %d",__FUNCTION__,__LINE__);
+ schedule_timeout(HZ);
+ //printk(" %s %d",__FUNCTION__,__LINE__);
+ down(&tSD_device->thread_sem);
+ }
+ }while(1);
+ up(&tSD_device->thread_sem);
+ return 0;
+}
+
+
+
+
+
+
+
+/*
+ * Generic MMC request handler. This is called for any queue on a
+ * particular host. When the host is not busy, we look for a request
+ * on any queue on this host, and attempt to issue it. This may
+ * not be the queue we were asked to process.
+
+ * modify for tsd card
+ */
+static void tSD_request(struct request_queue *q)
+{
+ struct mmc_queue *mq = q->queuedata;
+ struct request *req;
+ struct mmc_blk_data *tSD_device = mq->data;
+
+ if (!mq) {
+ while ((req = blk_fetch_request(q)) != NULL) {
+ req->cmd_flags |= REQ_QUIET;
+ __blk_end_request_all(req, -EIO);
+ }
+ return;
+ }
+
+ if(REQ_NONE == tSD_device->req_state){
+ wake_up_process(tSD_device->thread);
+ }
+}
+
+static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+{
+ struct scatterlist *sg;
+
+ sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+ if (!sg)
+ *err = -ENOMEM;
+ else {
+ *err = 0;
+ sg_init_table(sg, sg_len);
+ }
+
+ return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned max_discard;
+
+ max_discard = mmc_calc_max_discard(card);
+ if (!max_discard)
+ return;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ q->limits.max_discard_sectors = max_discard;
+ if (card->erased_byte == 0 && !mmc_can_discard(card))
+ q->limits.discard_zeroes_data = 1;
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = 0;
+ if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+}
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
+ *
+ * Initialise a MMC card request queue.
+ * modify for tsd card
+ */
+int tSD_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
+{
+ struct mmc_host *host = card->host;
+
+ u64 limit = BLK_BOUNCE_HIGH;
+ int ret;
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = *mmc_dev(host)->dma_mask;
+
+ mq->card = card;
+ mq->queue = blk_init_queue(tSD_request, lock);
+ if (!mq->queue)
+ return -ENOMEM;
+
+ mq->mqrq_cur = mqrq_cur;
+ mq->mqrq_prev = mqrq_prev;
+ mq->queue->queuedata = mq;
+
+ blk_queue_prep_rq(mq->queue, mmc_prep_request);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+#ifdef CONFIG_MMC_BLOCK_BOUNCE
+ if (host->max_segs == 1) {
+ unsigned int bouncesz;
+
+ bouncesz = MMC_QUEUE_BOUNCESZ;
+
+ if (bouncesz > host->max_req_size)
+ bouncesz = host->max_req_size;
+ if (bouncesz > host->max_seg_size)
+ bouncesz = host->max_seg_size;
+ if (bouncesz > (host->max_blk_count * 512))
+ bouncesz = host->max_blk_count * 512;
+
+ if (bouncesz > 512) {
+ mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_cur->bounce_buf) {
+ pr_warning("%s: unable to "
+ "allocate bounce cur buffer\n",
+ mmc_card_name(card));
+ }
+ mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_prev->bounce_buf) {
+ pr_warning("%s: unable to "
+ "allocate bounce prev buffer\n",
+ mmc_card_name(card));
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+ }
+ }
+
+ if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
+ blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
+ blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
+ blk_queue_max_segments(mq->queue, bouncesz / 512);
+ blk_queue_max_segment_size(mq->queue, bouncesz);
+
+ mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_cur->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ goto cleanup_queue;
+ }
+ }
+#endif
+
+ if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+
+ mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
+ goto cleanup_queue;
+ }
+
+
+
+ return 0;
+ //free_bounce_sg:
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
+ cleanup_queue:
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
+ blk_cleanup_queue(mq->queue);
+ return ret;
+}
+
+void tSD_cleanup_queue(struct mmc_queue *mq)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
+
+ /* Make sure the queue isn't suspended, as that will deadlock */
+ mmc_queue_resume(mq);
+
+ ///* Then terminate our worker thread */
+ //kthread_stop(mq->thread);
+
+ /* Empty the queue */
+ spin_lock_irqsave(q->queue_lock, flags);
+ q->queuedata = NULL;
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
+ mq->card = NULL;
+}
+//EXPORT_SYMBOL(tSD_cleanup_queue);
+
+int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+ int ret = 0;
+
+
+ mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
+ if (!mqrq_cur->packed) {
+ pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
+ mmc_card_name(card));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
+ if (!mqrq_prev->packed) {
+ pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
+ mmc_card_name(card));
+ kfree(mqrq_cur->packed);
+ mqrq_cur->packed = NULL;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&mqrq_cur->packed->list);
+ INIT_LIST_HEAD(&mqrq_prev->packed->list);
+
+out:
+ return ret;
+}
+
+void mmc_packed_clean(struct mmc_queue *mq)
+{
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+
+ kfree(mqrq_cur->packed);
+ mqrq_cur->packed = NULL;
+ kfree(mqrq_prev->packed);
+ mqrq_prev->packed = NULL;
+}
+
+/**
+ * mmc_queue_suspend - suspend a MMC request queue
+ * @mq: MMC queue to suspend
+ *
+ * Stop the block request queue, and wait for our thread to
+ * complete any outstanding requests. This ensures that we
+ * won't suspend while a request is being processed.
+ */
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+#if 0
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+
+ if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
+ mq->flags |= MMC_QUEUE_SUSPENDED;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ down(&mq->thread_sem);
+ }
+#endif
+}
+
+/**
+ * mmc_queue_resume - resume a previously suspended MMC request queue
+ * @mq: MMC queue to resume
+ */
+void mmc_queue_resume(struct mmc_queue *mq)
+{
+#if 0
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+
+ if (mq->flags & MMC_QUEUE_SUSPENDED) {
+ mq->flags &= ~MMC_QUEUE_SUSPENDED;
+
+ up(&mq->thread_sem);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+#endif
+}
+
+static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
+ struct mmc_packed *packed,
+ struct scatterlist *sg,
+ enum mmc_packed_type cmd_type)
+{
+ struct scatterlist *__sg = sg;
+ unsigned int sg_len = 0;
+ struct request *req;
+
+ if (mmc_packed_wr(cmd_type)) {
+ unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
+ unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
+ unsigned int len, remain, offset = 0;
+ u8 *buf = (u8 *)packed->cmd_hdr;
+
+ remain = hdr_sz;
+ do {
+ len = min(remain, max_seg_sz);
+ sg_set_buf(__sg, buf + offset, len);
+ offset += len;
+ remain -= len;
+ (__sg++)->page_link &= ~0x02;
+ sg_len++;
+ } while (remain);
+ }
+
+ list_for_each_entry(req, &packed->list, queuelist) {
+ sg_len += blk_rq_map_sg(mq->queue, req, __sg);
+ __sg = sg + (sg_len - 1);
+ (__sg++)->page_link &= ~0x02;
+ }
+ sg_mark_end(sg + (sg_len - 1));
+ return sg_len;
+}
+
+/*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
+{
+ unsigned int sg_len;
+ size_t buflen;
+ struct scatterlist *sg;
+ enum mmc_packed_type cmd_type;
+ int i;
+
+ cmd_type = mqrq->cmd_type;
+
+ if (!mqrq->bounce_buf) {
+ if (mmc_packed_cmd(cmd_type))
+ return mmc_queue_packed_map_sg(mq, mqrq->packed,
+ mqrq->sg, cmd_type);
+ else
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ }
+
+ BUG_ON(!mqrq->bounce_sg);
+
+ if (mmc_packed_cmd(cmd_type))
+ sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
+ mqrq->bounce_sg, cmd_type);
+ else
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+
+ mqrq->bounce_sg_len = sg_len;
+
+ buflen = 0;
+ for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
+ buflen += sg->length;
+
+ sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
+
+ return 1;
+}
+
+/*
+ * If writing, bounce the data to the buffer before the request
+ * is sent to the host driver
+ */
+void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
+{
+ if (!mqrq->bounce_buf)
+ return;
+
+ if (rq_data_dir(mqrq->req) != WRITE)
+ return;
+
+ sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
+}
+
+/*
+ * If reading, bounce the data from the buffer after the request
+ * has been handled by the host driver
+ */
+void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
+{
+ if (!mqrq->bounce_buf)
+ return;
+
+ if (rq_data_dir(mqrq->req) != READ)
+ return;
+
+ sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
+}
diff --git a/drivers/mmc/card/tsd_queue.h b/drivers/mmc/card/tsd_queue.h
new file mode 100755
index 0000000..afec355
--- /dev/null
+++ b/drivers/mmc/card/tsd_queue.h
@@ -0,0 +1,87 @@
+#ifndef TSD_QUEUE_H
+#define TSD_QUEUE_H
+
+#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
+
+
+struct request;
+struct task_struct;
+
+#define REQ_EXIST 1
+#define REQ_NONE 0
+
+
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+enum mmc_packed_type {
+ MMC_PACKED_NONE = 0,
+ MMC_PACKED_WRITE,
+};
+
+#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
+#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
+
+struct mmc_packed {
+ struct list_head list;
+ u32 cmd_hdr[1024];
+ unsigned int blocks;
+ u8 nr_entries;
+ u8 retries;
+ s16 idx_failure;
+};
+
+struct mmc_queue_req {
+ struct request *req;
+ struct mmc_blk_request brq;
+ struct scatterlist *sg;
+ char *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned int bounce_sg_len;
+ struct mmc_async_req mmc_active;
+ enum mmc_packed_type cmd_type;
+ struct mmc_packed *packed;
+};
+
+struct mmc_queue {
+ struct mmc_card *card;
+ struct task_struct *thread;
+ struct semaphore thread_sem;
+ unsigned int flags;
+#define MMC_QUEUE_SUSPENDED (1 << 0)
+#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+
+ int (*issue_fn)(struct mmc_queue *, struct request *);
+ void *data;
+ struct request_queue *queue;
+ struct mmc_queue_req mqrq[2];
+ struct mmc_queue_req *mqrq_cur;
+ struct mmc_queue_req *mqrq_prev;
+};
+
+//extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+// const char *);
+extern void tSD_cleanup_queue(struct mmc_queue *);
+
+extern void mmc_queue_suspend(struct mmc_queue *);
+extern void mmc_queue_resume(struct mmc_queue *);
+
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+ struct mmc_queue_req *);
+extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
+extern void mmc_queue_bounce_post(struct mmc_queue_req *);
+
+extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
+extern void mmc_packed_clean(struct mmc_queue *);
+
+
+extern int tSD_queue_thread(void *d);
+extern int tSD_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+ const char *);
+
+#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
old mode 100644
new mode 100755
index 972ff84..ec16cda
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -21,12 +21,16 @@
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <mach/bootdev.h>
#include "core.h"
#include "sdio_cis.h"
#include "bus.h"
+#include "./../host/gl520x_mmc.h"
#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
+/* wait 3 s to card init ok ,then goto tsd proble*/
+#define WAIT_CARD_TIMEOUT 300
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -248,9 +252,11 @@ static void mmc_release_card(struct device *dev)
sdio_free_common_cis(card);
+ if (card->info)
kfree(card->info);
kfree(card);
+ card = NULL;
}
/*
@@ -276,6 +282,77 @@ struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
return card;
}
+
+struct mmc_card *slot0_card = NULL;
+struct mmc_card *slot2_card = NULL;
+
+int owl_set_carddev_match_name(void)
+{
+ int ret = 0;
+ int boot_dev;
+ int timeout = WAIT_CARD_TIMEOUT;
+ boot_dev = owl_get_boot_dev();
+ printk("%s: bootdev 0x%x\n", __FUNCTION__, boot_dev);
+
+ switch (boot_dev) {
+
+ case OWL_BOOTDEV_SD0:
+
+ while((!slot0_card)&&(--timeout)){
+ msleep(10);
+ }
+ if( timeout <= 0){
+ printk("err:%s:slot0_card NULL\n",__FUNCTION__);
+ ret = -1;
+ goto out;
+ }else{
+ printk("%s,sd0 as boot card,sd0 name tsd_card\n", __FUNCTION__);
+ dev_set_name(&slot0_card->dev,"%s","tsd_card");
+ }
+ break;
+
+ case OWL_BOOTDEV_SD2:
+
+ while((!slot2_card)&&(--timeout)){
+ msleep(10);
+ }
+ if( timeout < 0){
+ printk("err:%s:slot2_card NULL\n",__FUNCTION__);
+ ret = -1;
+ goto out;
+ }else{
+ printk("%s,sd2 as boot card,sd2 name tsd_card\n", __FUNCTION__);
+ dev_set_name(&slot2_card->dev,"%s","tsd_card");
+ }
+
+ break;
+
+ case OWL_BOOTDEV_SD02SD2:
+
+ while((!slot2_card)&&(--timeout)){
+ msleep(10);
+ }
+ if(timeout < 0){
+ printk("err:%s:slot2_card NULL\n",__FUNCTION__);
+ ret = -1;
+ goto out;
+ }else{
+ printk("%s,sd0 as boot card,sd2 name card_to_card\n", __FUNCTION__);
+ dev_set_name(&slot2_card->dev,"%s","card_to_card");
+ }
+ break;
+
+ default:
+ printk("ERR:%s: bootdev 0x%x\n", __FUNCTION__, boot_dev);
+ ret = -1 ;
+ break;
+
+ }
+out:
+ return ret;
+}
+
+EXPORT_SYMBOL(owl_set_carddev_match_name);
/*
* Register a new MMC card with the driver model.
*/
@@ -284,6 +361,8 @@ int mmc_add_card(struct mmc_card *card)
int ret;
const char *type;
const char *uhs_bus_speed_mode = "";
+ struct gl520xmmc_host *hcd;
+ int boot_dev;
static const char *const uhs_speeds[] = {
[UHS_SDR12_BUS_SPEED] = "SDR12 ",
[UHS_SDR25_BUS_SPEED] = "SDR25 ",
@@ -293,7 +372,39 @@ int mmc_add_card(struct mmc_card *card)
};
- dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
+
+ boot_dev = owl_get_boot_dev();
+ printk("%s: bootdev 0x%x\n", __FUNCTION__, boot_dev);
+
+ hcd = mmc_priv(card->host);
+ if(SDC0_SLOT == hcd->id)
+ {
+ slot0_card = card;
+ //if (boot_dev == OWL_BOOTDEV_NAND || boot_dev == OWL_BOOTDEV_SD2) {
+ printk("force sd0/sd1 host ext-card\n");
+ dev_set_name(&card->dev,"%s","sd_card");
+ //}
+ //else{
+ // dev_set_name(&card->dev, "%s", "sd_boot_card");
+ //}
+
+ }
+ else if(SDC1_SLOT == hcd->id)
+ {
+ dev_set_name(&card->dev,"%s:%04x",mmc_hostname(card->host),card->rca);
+ }
+ else if(SDC2_SLOT == hcd->id)
+ {
+ slot2_card = card;
+ if(boot_dev == OWL_BOOTDEV_SD0){
+ printk("force sd2(emmc) host ext-card\n");
+ dev_set_name(&card->dev, "%s", "emmc");
+ } else{
+ printk("force sd2(emmc) host emmc_boot_card\n");
+ dev_set_name(&card->dev, "%s", "emmc_boot_card");
+ //return 0;
+ }
+ }
switch (card->type) {
case MMC_TYPE_MMC:
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
old mode 100644
new mode 100755
index 06babbe..d2b38bf
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -45,6 +45,7 @@
#include "mmc_ops.h"
#include "sd_ops.h"
#include "sdio_ops.h"
+#include "../host/gl520x_mmc.h"
/* If the device is not responding */
#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
@@ -2529,6 +2530,8 @@ void mmc_rescan(struct work_struct *work)
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
+ struct gl520xmmc_host *owl_host;
+ owl_host = mmc_priv(host);
if (host->trigger_card_event && host->ops->card_event) {
host->ops->card_event(host);
@@ -2541,6 +2544,7 @@ void mmc_rescan(struct work_struct *work)
/* If there is a non-removable card registered, only scan once */
if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
return;
+
host->rescan_entered = 1;
mmc_bus_get(host);
@@ -2592,10 +2596,28 @@ void mmc_rescan(struct work_struct *work)
mmc_release_host(host);
out:
- if (host->caps & MMC_CAP_NEEDS_POLL)
+ if ((host->caps & MMC_CAP_NEEDS_POLL)
+#ifdef CONFIG_EARLYSUSPEND
+ &&(!(owl_host->mmc_early_suspend))
+#endif
+ ){
mmc_schedule_delayed_work(&host->detect, HZ);
+ }
+}
+void cancel_mmc_work(struct mmc_host *host )
+{
+
+}
+EXPORT_SYMBOL(cancel_mmc_work);
+
+
+void start_mmc_work(struct mmc_host *host )
+{
+ mmc_schedule_delayed_work(&host->detect, HZ/4);
}
+EXPORT_SYMBOL(start_mmc_work);
+
void mmc_start_host(struct mmc_host *host)
{
host->f_init = max(freqs[0], host->f_min);
@@ -2716,6 +2738,19 @@ int mmc_flush_cache(struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_flush_cache);
+int sd_mmc_reinit(struct mmc_host *host)
+{
+ int err = 0;
+ BUG_ON( !(host->bus_ops && !host->bus_dead)) ;
+ mmc_power_off(host);
+ mmc_power_up(host, host->card->ocr);
+ mmc_select_voltage(host, host->card->ocr);
+ BUG_ON(!host->bus_ops->resume);
+ err = host->bus_ops->resume(host);
+ return err;
+}
+EXPORT_SYMBOL(sd_mmc_reinit);
+
#ifdef CONFIG_PM
/* Do the card removal on suspend if card is assumed removeable
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
old mode 100644
new mode 100755
index 7163378..a8865b5
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -17,6 +17,7 @@
#include <linux/idr.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_device.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/leds.h>
@@ -26,6 +27,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/slot-gpio.h>
+#include <mach/bootdev.h>
#include "core.h"
#include "host.h"
@@ -573,6 +575,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
{
int err;
struct mmc_host *host;
+ int boot_dev, host_id;
+ const __be32 *addr;
+ struct device_node *dn = dev->of_node;
host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
if (!host)
@@ -581,8 +586,23 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
idr_preload(GFP_KERNEL);
+
spin_lock(&mmc_host_lock);
- err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
+
+
+ addr = of_get_property(dn, "reg", &host_id);
+ if (!addr || (host_id < sizeof(int)))
+ return NULL;
+
+ host_id = (int)((be32_to_cpu(*addr) - 0xB0230000) >> 14);
+ boot_dev = owl_get_boot_dev();
+
+ if(boot_dev > 0 && ((boot_dev - OWL_BOOTDEV_SD0) == host_id)) {
+ err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
+ } else {
+ err = idr_alloc(&mmc_host_idr, host, 1, 0, GFP_NOWAIT);
+ }
+
if (err >= 0)
host->index = err;
spin_unlock(&mmc_host_lock);
@@ -591,6 +611,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
kfree(host);
return NULL;
}
+
+ printk("## host_id: %d boot_dev %x host->index: %d\n", host_id, boot_dev, host->index);
dev_set_name(&host->class_dev, "mmc%d", host->index);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
old mode 100644
new mode 100755
index b61ad22..565e2bd
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -814,6 +814,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/* Erase init depends on CSD and SSR */
mmc_init_erase(card);
+ //fix bug cmd 6 crc7 error
+ // we must set 2M high clk
+ mmc_set_clock(host, 2000000);
/*
* Fetch switch information from card.
*/
@@ -1193,10 +1196,22 @@ int mmc_attach_sd(struct mmc_host *host)
BUG_ON(!host);
WARN_ON(!host->claimed);
+ // switch pin :only cmd and clk , uart tx rx
+ if(host->ops->switch_uart_pinctr(host)){
+ printk("Err:switch uart pin:%s\n",__FUNCTION__);
+ err = -1;
+ return err;
+ }
err = mmc_send_app_op_cond(host, 0, &ocr);
if (err)
return err;
+ // switch pin : clk cmd d0-d3
+ if(host->ops->switch_sd_pinctr(host)){
+ printk("Err:switch sd pin:%s\n",__FUNCTION__);
+ err = -1;
+ return err;
+ }
mmc_attach_bus(host, &mmc_sd_ops);
if (host->ocr_avail_sd)
@@ -1244,6 +1259,13 @@ remove_card:
host->card = NULL;
mmc_claim_host(host);
err:
+
+ // switch pin :only cmd and clk , uart tx rx
+ if(host->ops->switch_uart_pinctr(host)){
+ printk("Err:switch uart pin:%s\n",__FUNCTION__);
+ err = -1;
+ return err;
+ }
mmc_detach_bus(host);
pr_err("%s: error %d whilst initialising SD card\n",
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index e05851c..96891e6 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1161,3 +1161,41 @@ err:
return err;
}
+int sdio_reset_comm(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 ocr;
+ int err;
+
+ mmc_claim_host(host);
+
+ mmc_go_idle(host);
+ mmc_set_timing(card->host, 0);
+
+ mmc_set_clock(host, host->f_min);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ goto err;
+
+ host->card->ocr = mmc_select_voltage(host, ocr);
+ if (!host->card->ocr) {
+ err = -EINVAL;
+ goto err;
+ }
+ if (mmc_host_uhs(host))
+ host->card->ocr |= R4_18V_PRESENT;
+
+ err = mmc_sdio_init_card(host, host->card->ocr, card, 0);
+ if (err)
+ goto err;
+
+ mmc_release_host(host);
+ return 0;
+err:
+ printk("%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host), err);
+ mmc_release_host(host);
+ return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 78cb4d5..c0a2e15 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -319,6 +319,9 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
blocks = remainder / func->cur_blksize;
if (blocks > max_blocks)
blocks = max_blocks;
+ if (remainder > 512)
+ blocks = (remainder & ~((1<<9)-1)) /
+ func->cur_blksize;
size = blocks * func->cur_blksize;
ret = mmc_io_rw_extended(func->card, write,
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
old mode 100644
new mode 100755
index b1f837e..33419fc
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -482,7 +482,16 @@ config MMC_SPI
controllers.
If unsure, or if your system has no SPI master driver, say N.
+config MMC_OWL
+ tristate " Actions MMC "
+ help
+ Say Y here if you have MMC_OWL
+ connected to your system.
+
+ If unsure, say N.
+ To compile this driver as a module, choose M here: the
+ module will be called CONFIG_MMC_OWL.
config MMC_S3C
tristate "Samsung S3C SD/MMC Card Interface support"
depends on ARCH_S3C24XX
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
old mode 100644
new mode 100755
index 2bf04ff..d5b990d
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -74,6 +74,8 @@ obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
+obj-$(CONFIG_MMC_OWL) += act_mmc_host.o
+act_mmc_host-y := wlan_device.o wlan_driver.o gl520x_mmc.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
endif
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
old mode 100644
new mode 100755
index f45ab91..49da32a
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -98,7 +98,7 @@
#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
#define SDMMC_TMOUT_RESP_MSK 0xFF
-/* card-type register defines */
+/* card_type register defines */
#define SDMMC_CTYPE_8BIT BIT(16)
#define SDMMC_CTYPE_4BIT BIT(0)
#define SDMMC_CTYPE_1BIT 0
diff --git a/drivers/mmc/host/gl520x_dma.h b/drivers/mmc/host/gl520x_dma.h
new file mode 100755
index 0000000..7e065d1
--- /dev/null
+++ b/drivers/mmc/host/gl520x_dma.h
@@ -0,0 +1,143 @@
+#ifndef _ATV5203_DMA_H_
+#define _ATV5203_DMA_H_
+
+
+
+
+
+/*
+ * Simple DMA transfer operations defines for MMC/SD card
+ */
+#define DMA_MODE_OFFSET 0x0000
+#define DMA_SOURCE_OFFSET 0x0004
+#define DMA_DESTINATION_OFFSET 0x0008
+#define DMA_FRAME_LEN_OFFSET 0x000C
+#define DMA_FRAME_CNT_OFFSET 0x0010
+#define DMA_REMAIN_FRAME_CNT_OFFSET 0x0014
+#define DMA_REMAIN_CNT_OFFSET 0x0018
+#define DMA_SOURCE_STRIDE_OFFSET 0x001C
+#define DMA_DESTINATION_STRIDE_OFFSET 0x0020
+#define DMA_START_OFFSET 0x0024
+#define DMA_ACP_ATTRIBUTE_OFFSET 0x0028
+#define DMA_CHAINED_CTL_OFFSET 0x002C
+#define DMA_CONSTANT_OFFSET 0x0030
+#define DMA_LINKLIST_CTL_OFFSET 0x0034
+#define DMA_NEXT_DESCRIPTOR_OFFSET 0x0038
+#define DMA_CURRENT_DESCRIPTOR_NUM_OFFSET 0x003C
+#define DMA_INT_CTL_OFFSET 0x0040
+#define DMA_INT_STATUS_OFFSET 0x0044
+#define DMA_CURRENT_SOURCE_POINTER_OFFSET 0x0048
+#define DMA_CURRENT_DESTINATION_POINTER_OFFSET 0x004C
+
+/* Bit defines */
+#define DMA_FRAME_LEN_MASK 0xFFFFF
+#define DMA_FRAME_CNT_MASK 0xFFF
+
+#define DMA_START_DPE (0X1 << 31)
+#define DMA_START_DSE (0X1 << 0)
+
+unsigned int dma_base[] = {
+ DMA0_BASE, DMA1_BASE, DMA2_BASE, DMA3_BASE, DMA4_BASE,
+ DMA5_BASE, DMA6_BASE, DMA7_BASE, DMA8_BASE, DMA9_BASE,
+ DMA10_BASE, DMA11_BASE
+};
+
+/**
+ * set_dma_mode() - set the dma transfer mode
+ * @dmanr: dma channel number
+ * @mode: dma transfer mode
+ */
+static __inline__ void set_dma_mode(unsigned int dmanr, unsigned int mode)
+{
+ act_writel(mode, dma_base[dmanr] + DMA_MODE_OFFSET);
+}
+
+/**
+ * get_dma_mode() - get the dma transfer mode
+ * @dmanr: dma channel number
+ */
+static __inline__ unsigned int get_dma_mode(unsigned int dmanr)
+{
+ return act_readl(dma_base[dmanr] + DMA_MODE_OFFSET);
+}
+
+/**
+ * set_dma_src_addr() - set the dma transfer source address
+ * @dmanr: dma channel number
+ * @src: source address
+ */
+static __inline__ void set_dma_src_addr(unsigned int dmanr, unsigned int src)
+{
+ act_writel(src, dma_base[dmanr] + DMA_SOURCE_OFFSET);
+}
+
+/**
+ * set_dma_dst_addr() - set the dma transfer destination address
+ * @dmanr: dma channel number
+ * @dst: destination address
+ */
+static __inline__ void set_dma_dst_addr(unsigned int dmanr, unsigned int dst)
+{
+ act_writel(dst, dma_base[dmanr] + DMA_DESTINATION_OFFSET);
+}
+
+/**
+ * set_dma_frame_len() - set the dma transfer frame length
+ * @dmanr: dma channel number
+ * @len: dma transfer frame length
+ */
+static __inline__ void set_dma_frame_len(unsigned int dmanr, unsigned int len)
+{
+ len &= DMA_FRAME_LEN_MASK;
+ act_writel(len, dma_base[dmanr] + DMA_FRAME_LEN_OFFSET);
+}
+
+/**
+ * set_dma_frame_count() - set the dma transfer frame number
+ * @dmanr: dma channel number
+ * @count: dma transfer frame number
+ */
+static __inline__ void set_dma_frame_count(unsigned int dmanr, unsigned int cnt)
+{
+ cnt &= DMA_FRAME_CNT_MASK;
+ act_writel(cnt, dma_base[dmanr] + DMA_FRAME_CNT_OFFSET);
+}
+
+/**
+ * start_dma() - start the dma transfer
+ * @dmanr: dma channel number
+ */
+static __inline__ void start_dma(unsigned int dmanr)
+{
+ act_writel(DMA_START_DSE, dma_base[dmanr] + DMA_START_OFFSET);
+}
+
+/**
+ * dma_started() - test if the dma channel started
+ * @dmanr: dma channel number
+ */
+static __inline__ int dma_started(unsigned int dmanr)
+{
+ return (act_readl(dma_base[dmanr] + DMA_START_OFFSET) & DMA_START_DSE) ? 1 : 0;
+}
+
+/**
+ * pause_dma() - pause the dma transfer
+ * @dmanr: dma channel number
+ */
+static __inline__ void pause_dma(unsigned int dmanr)
+{
+ act_writel(DMA_START_DPE, dma_base[dmanr] + DMA_START_OFFSET);
+}
+
+/**
+ * stop_dma() - stop the dma transfer
+ * @dmanr: dma channel number
+ */
+static __inline__ void stop_dma(unsigned int dmanr)
+{
+ act_writel(act_readl(dma_base[dmanr] + DMA_START_OFFSET) & (~DMA_START_DSE),
+ dma_base[dmanr] + DMA_START_OFFSET);
+}
+
+#endif /* end of _ATV5203_DMA_H_ */
diff --git a/drivers/mmc/host/gl520x_mmc.c b/drivers/mmc/host/gl520x_mmc.c
new file mode 100755
index 0000000..ca66a1f
--- /dev/null
+++ b/drivers/mmc/host/gl520x_mmc.c
@@ -0,0 +1,2149 @@
+/*
+ * gl520x_mmc.c - GL5203 SD/MMC driver
+ *
+ * Copyright (C) 2012, Actions Semiconductor Co. LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
+#include <linux/cpufreq.h>
+#include <linux/genhd.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/clk.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/hdmac-owl.h>
+#include <mach/bootdev.h>
+
+#include <linux/mfd/atc260x/atc260x.h>
+#include "gl520x_mmc.h"
+#include "wlan_plat_data.h"
+#include "wlan_device.h"
+
+static int act_check_trs_date_status(
+ struct gl520xmmc_host *host , struct mmc_request *mrq);
+static void act_dump_reg(struct gl520xmmc_host *host);
+
+#undef pr_debug
+/* #define ACTS_MMC_DEBUG */
+
+#ifdef ACTS_MMC_DEBUG
+#define pr_debug(format, arg...) \
+ printk(KERN_INFO format, ##arg)
+#else
+#define pr_debug(format, args...)
+#endif
+//#define DEBUG_EN 1
+#ifdef DEBUG_EN
+#define DEBUG(format, arg...) \
+ printk(KERN_INFO format, ##arg)
+#else
+#define DEBUG(format, args...)\
+ do{}while(0);
+#endif
+
+/*
+ * SD Controller Linked Card type, one of those:
+ * MMC_CARD_DISABLE | MMC_CARD_MEMORY | MMC_CARD_WIFI
+ */
+static const char * const card_types[] = {
+ [MMC_CARD_DISABLE] = "none",
+ [MMC_CARD_MEMORY] = "memory",
+ [MMC_CARD_EMMC] = "emmc",
+ [MMC_CARD_WIFI] = "wifi",
+};
+
+
+/*
+ * Method to detect card Insert/Extract:
+ */
+static const char * const card_detect_modes[] = {
+ [SIRQ_DETECT_CARD] = "sirq",
+ [GPIO_DETECT_CARD] = "gpio",
+ [COMMAND_DETECT_CARD] = "command",
+};
+
+
+static int detect_use_sirq = 0 ;
+static int detect_use_gpio = 0 ;
+
+extern int acts_wlan_set_power(struct wlan_plat_data *pdata, int on,unsigned long msec);
+extern struct wlan_plat_data *acts_get_wlan_plat_data(void);
+
+
+static void acts_dump_mfp(struct gl520xmmc_host *host)
+{
+ void __iomem *reg_mfp;
+ void __iomem *reg_cmu;
+
+ reg_mfp = ioremap(GPIO_MFP_PWM_BASE,INTC_GPIOE_TYPE-GPIO_MFP_PWM_BASE+4);
+ reg_cmu = ioremap(CMU_BASE,CMU_AUDIOPLL_ETHPLLDEBUG-CMU_BASE+4);
+ if(!(((u32)reg_cmu)&&((u32)(reg_mfp)))){
+ printk("ERR:%s:ioreamp:reg_cmu=0x%08x,reg_mfp=0x%08x\n",\
+ __FUNCTION__,(u32)reg_cmu,(u32)(reg_mfp));
+ return;
+ }
+
+ pr_err("\tGPIO_CINEN:0x%x\n", readl(DUMP_GPIO_CINEN(reg_mfp)));
+ pr_err("\tGPIO_COUTEN:0x%x\n", readl(DUMP_GPIO_COUTEN(reg_mfp)));
+
+ pr_err("\tMFP_CTL0:0x%x\n", readl(DUMP_MFP_CTL0(reg_mfp)));
+ pr_err("\tMFP_CTL1:0x%x\n", readl(DUMP_MFP_CTL1(reg_mfp)));
+ pr_err("\tMFP_CTL2:0x%x\n", readl(DUMP_MFP_CTL2(reg_mfp)));
+ pr_err("\tMFP_CTL3:0x%x\n", readl(DUMP_MFP_CTL3(reg_mfp)));
+ pr_err("\tPAD_PULLCTL0:0x%x\n", readl(DUMP_PAD_PULLCTL0(reg_mfp)));
+ pr_err("\tPAD_PULLCTL1:0x%x\n", readl(DUMP_PAD_PULLCTL1(reg_mfp)));
+ pr_err("\tPAD_PULLCTL2:0x%x\n", readl(DUMP_PAD_PULLCTL2(reg_mfp)));
+
+ pr_err("\tPAD_DRV0:0x%x\n", readl(DUMP_PAD_DVR0(reg_mfp)));
+ pr_err("\tPAD_DRV1:0x%x\n", readl(DUMP_PAD_DVR1(reg_mfp)));
+ pr_err("\tPAD_DRV2:0x%x\n", readl(DUMP_PAD_DVR2(reg_mfp)));
+
+ pr_err("\tCMU_DEVCLKEN0:0x%x\n", readl(DUMP_CMU_DEVCLKEN0(reg_cmu)));
+ pr_err("\tCMU_DEVCLKEN1:0x%x\n", readl(DUMP_CMU_DEVCLKEN1(reg_cmu)));
+
+ pr_err("\tCMU_DEVPLL:0x%x\n", readl(DUMP_CMU_DEVPLL(reg_cmu)));
+ pr_err("\tCMU_NANDPLL:0x%x\n", readl(DUMP_CMU_NANDPLL(reg_cmu)));
+
+ pr_err("\tCMU_SD0CLK:0x%x\n", readl(DUMP_CMU_CMU_SD0CLK(reg_cmu)));
+ pr_err("\tCMU_SD1CLK:0x%x\n", readl(DUMP_CMU_CMU_SD1CLK(reg_cmu)));
+ pr_err("\tCMU_SD2CLK:0x%x\n", readl(DUMP_CMU_CMU_SD2CLK(reg_cmu)));
+ if(reg_mfp)
+ iounmap(reg_mfp);
+ if(reg_cmu)
+ iounmap(reg_cmu);
+}
+
+
+
+
+
+static void acts_dump_sdc(struct gl520xmmc_host *host)
+{
+ pr_err("\n\tSD_EN:0x%x\n", readl(HOST_EN(host)));
+ pr_err("\tSD_CTL:0x%x\n", readl(HOST_CTL(host)));
+ pr_err("\tSD_STATE:0x%x\n", readl(HOST_STATE(host)));
+ pr_err("\tSD_CMD:0x%x\n", readl(HOST_CMD(host)));
+ pr_err("\tSD_ARG:0x%x\n", readl(HOST_ARG(host)));
+ pr_err("\tSD_RSPBUF0:0x%x\n", readl(HOST_RSPBUF0(host)));
+ pr_err("\tSD_RSPBUF1:0x%x\n", readl(HOST_RSPBUF1(host)));
+ pr_err("\tSD_RSPBUF2:0x%x\n", readl(HOST_RSPBUF2(host)));
+ pr_err("\tSD_RSPBUF3:0x%x\n", readl(HOST_RSPBUF3(host)));
+ pr_err("\tSD_RSPBUF4:0x%x\n", readl(HOST_RSPBUF4(host)));
+ pr_err("\tSD_DAT:0x%x\n", readl(HOST_DAT(host)));
+ pr_err("\tSD_BLK_SIZE:0x%x\n\n", readl(HOST_BLK_SIZE(host)));
+ pr_err("\tSD_BLK_NUM:0x%x\n", readl(HOST_BLK_NUM(host)));
+ pr_err("\tSD_BUF_SIZE:0x%x\n", readl(HOST_BUF_SIZE(host)));
+}
+
+static void acts_dump_dmac(struct gl520xmmc_host *host)
+{
+ owl_dma_dump_all(host->dma);
+}
+
+static inline int acts_enable_clock(struct gl520xmmc_host *host)
+{
+ int ret;
+
+ if (!host->clk_on) {
+ ret = module_clk_enable(host->module_id);
+ if (ret) {
+ pr_err("SDC[%d] enable module clock error\n",
+ host->id);
+ return ret;
+ }
+ host->clk_on = 1;
+ }
+ return 0;
+}
+
+static inline void acts_disable_clock(struct gl520xmmc_host *host)
+{
+ if (host->clk_on) {
+ module_clk_disable(host->module_id);
+ host->clk_on = 0;
+ }
+}
+
+static int acts_mmc_send_init_clk(struct gl520xmmc_host *host)
+{
+ u32 mode;
+ int ret = 0;
+
+ init_completion(&host->sdc_complete);
+ mode = SD_CTL_TS | SD_CTL_TCN(5) | SD_CTL_TM(8);
+ mode |= (readl(HOST_CTL(host)) & (0xff << 16));
+
+ writel(mode, HOST_CTL(host));
+ DEBUG("host%d: send acts_mmc_send_init_clk \n",host->id );
+
+ if (!wait_for_completion_timeout(&host->sdc_complete, HZ)) {
+ pr_err("*SDC%d send 80 init clock timeout error\n", host->id);
+ act_dump_reg(host);
+ ret = -1;
+ }
+ DEBUG("host%d: acts_mmc_send_init_clk OK\n",host->id);
+ return ret;
+}
+
+static void acts_mmc_set_clk(struct gl520xmmc_host *host, int rate)
+{
+
+
+ if (0 == rate) {
+ pr_err("SDC%d set clock error\n", host->id);
+ return;
+
+ }
+
+ /*
+ * Set the RDELAY and WDELAY based on the sd clk.
+ */
+ if (rate <= 1000000) {
+
+ writel((readl(HOST_CTL(host)) & (~(0xff << 16))) |
+ SD_CTL_RDELAY(host->rdelay.delay_lowclk) |
+ SD_CTL_WDELAY(host->wdelay.delay_lowclk),
+ HOST_CTL(host));
+
+ } else if ((rate > 1000000) && (rate <= 26000000)) {
+ writel((readl(HOST_CTL(host)) & (~(0xff << 16))) |
+ SD_CTL_RDELAY(host->rdelay.delay_midclk) |
+ SD_CTL_WDELAY(host->wdelay.delay_midclk),
+ HOST_CTL(host));
+
+ } else if ((rate > 26000000) && (rate <= 52000000)) {
+
+ writel((readl(HOST_CTL(host)) & (~(0xff << 16))) |
+ SD_CTL_RDELAY(host->rdelay.delay_highclk) |
+ SD_CTL_WDELAY(host->wdelay.delay_highclk),
+ HOST_CTL(host));
+
+ } else if ((rate > 52000000) && (rate <= 100000000)) {
+
+ writel((readl(HOST_CTL(host)) & (~(0xff << 16))) |
+ SD_CTL_RDELAY(6) |
+ SD_CTL_WDELAY(4),
+ HOST_CTL(host));
+
+ } else {
+ pr_err("SD3.0 max clock should not > 100Mhz\n");
+
+ }
+
+ host->read_delay_chain = (readl(HOST_CTL(host)) & (0xf << 20))>>20;
+ host->write_delay_chain = (readl(HOST_CTL(host)) & (0xf << 16))>>16;
+ host->write_delay_chain_bak = host->write_delay_chain;
+ host->read_delay_chain_bak = host->read_delay_chain;
+ module_clk_set_rate(host, rate);
+
+}
+
+static void act_mmc_opt_regulator(struct gl520xmmc_host *host,bool enable)
+{
+ int ret = 0;
+ if (!(IS_ERR(host->reg)|| NULL==host->reg)) {
+ if(enable){
+ ret = regulator_enable(host->reg);
+ if(ret){
+ printk("host%d regulator_enable fail\n",host->id);
+ }
+ }else{
+ ret = regulator_disable(host->reg);
+ if(ret){
+ printk("host%d regulator_disable fail\n",host->id);
+ }
+ }
+
+ }
+}
+
+static void acts_mmc_power_up(struct gl520xmmc_host *host)
+{
+ /* enable gl5302 power for card */
+
+
+ /* power on reset */
+ module_reset(host->module_id);
+ acts_enable_clock(host);
+
+ writel(SD_ENABLE | SD_EN_RESE, HOST_EN(host));
+}
+
+static int owl_switch_uart_pinctr(struct mmc_host *mmc)
+{
+
+ int ret = 0;
+ struct gl520xmmc_host *host;
+ host = mmc_priv(mmc);
+
+ if(mmc_card_expected_mem(host->type_expected)&&\
+ (host->sdio_uart_supported)){
+
+ if(host->switch_pin_flag == SD_PIN){
+ // free sd pin
+ if (!IS_ERR(host->pcl)){
+ pinctrl_put(host->pcl);
+ }
+ // requeset uart pin
+ host->pcl = pinctrl_get_select(mmc->parent, PINCTRL_UART_PIN);
+ if (IS_ERR(host->pcl)){
+ pr_err("SDC%d get misc uart pinctrl failed, %ld\n",
+ host->id, PTR_ERR(host->pcl));
+ ret = (int)PTR_ERR(host->pcl);
+ host->switch_pin_flag = ERR_PIN ;
+ goto out;
+ }
+ }else if(host->switch_pin_flag == UART_PIN){
+ goto out;
+ }else{
+ printk("err:owl_switch_uart_pinctr\n");
+ host->switch_pin_flag = ERR_PIN ;
+ ret = -1 ;
+ }
+
+ host->switch_pin_flag = UART_PIN;
+
+ }
+
+out:
+ return ret;
+}
+
+static int owl_switch_sd_pinctr(struct mmc_host *mmc)
+{
+
+ int ret = 0;
+ struct gl520xmmc_host *host;
+ host = mmc_priv(mmc);
+
+ if(mmc_card_expected_mem(host->type_expected)&&\
+ (host->sdio_uart_supported)){
+ if(host->switch_pin_flag == UART_PIN){
+ // free uart pin
+ if (!IS_ERR(host->pcl)){
+ pinctrl_put(host->pcl);
+ }
+ // requeset sd pin
+ host->pcl = pinctrl_get_select_default(mmc->parent);
+ if (IS_ERR(host->pcl)){
+ pr_err("SDC%d get sd pinctrl failed, %ld\n",
+ host->id, PTR_ERR(host->pcl));
+ ret = (int)PTR_ERR(host->pcl);
+ host->switch_pin_flag = ERR_PIN ;
+ goto out;
+ }
+ }else if(host->switch_pin_flag == SD_PIN){
+ goto out;
+ }else{
+ printk("err:owl_switch_sd_pinctr\n");
+ host->switch_pin_flag = ERR_PIN ;
+ ret = -1 ;
+ }
+
+ host->switch_pin_flag = SD_PIN;
+
+ }
+
+out:
+ return ret;
+}
+static int acts_mmc_request_pinctrl(struct gl520xmmc_host *host)
+{
+
+ static int req_pinctr_num = 0;
+ // host0 usd for sdcard need not request,it will request when resan sdcard
+ if(mmc_card_expected_mem(host->type_expected)&&\
+ (host->sdio_uart_supported))
+ return 0;
+
+ if (host->sdio_uart_supported)
+ sdio_uart_pinctrl_free();
+
+ host->pcl = pinctrl_get_select_default(host->mmc->parent);
+
+ if (IS_ERR(host->pcl)) {
+ pr_err("SDC%d get default pinctrl failed, %ld\n",
+ host->id, PTR_ERR(host->pcl));
+ return (int)PTR_ERR(host->pcl);
+ }
+
+ /* first time request pinctr in platrom ,so sd2
+ need not request again for the first time
+ */
+ if( (SDC2_SLOT == host->id)&&( !req_pinctr_num ) ){
+
+ if (IS_ERR(host->pcl) ||(NULL==host->pcl)) {
+ printk("SDC%d get default pinctrl failed, %ld\n",
+ host->id, PTR_ERR(host->pcl));
+
+ return (int)PTR_ERR(host->pcl);
+ }
+ pinctrl_put(host->pcl);
+ req_pinctr_num = 1;
+ printk("host%d,platfrom request already\n",host->id);
+ }
+
+ return 0;
+}
+
+static int acts_mmc_free_pinctrl(struct gl520xmmc_host *host)
+{
+
+ int ret;
+ // host0 usd for sdcard need not free,it will free when resan fail
+ if(mmc_card_expected_mem(host->type_expected)&&\
+ (host->sdio_uart_supported))
+ return 0;
+
+ if (host->pcl == NULL) {
+ pr_err("SDC%d pinctrl has not initialed\n", host->id);
+ return 0;
+ }
+
+ if (!IS_ERR(host->pcl)) {
+ pinctrl_put(host->pcl);
+ }
+
+ if (host->sdio_uart_supported) {
+ ret = sdio_uart_pinctrl_request();
+ if (ret < 0) {
+ pr_err("SDC%d uart pinctrl request failed\n",
+ host->id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void acts_mmc_power_on(struct gl520xmmc_host *host)
+{
+ mutex_lock(&host->pin_mutex);
+ if (acts_mmc_request_pinctrl(host) < 0)
+ pr_err("SDC%d request pinctrl failed\n", host->id);
+ mutex_unlock(&host->pin_mutex);
+
+ /* clocks is provided to eliminate power-up synchronization problems */
+ /* enabel cmd irq */
+ writel(readl(HOST_STATE(host)) | SD_STATE_TEIE,
+ HOST_STATE(host));
+
+ /* module function enable */
+ if (mmc_card_expected_wifi(host->type_expected)) {
+ writel(readl(HOST_EN(host)) | SD_EN_SDIOEN,
+ HOST_EN(host));
+ }
+
+ acts_mmc_send_init_clk(host);
+}
+
+static void acts_mmc_power_off(struct gl520xmmc_host *host)
+{
+ mutex_lock(&host->pin_mutex);
+ if (acts_mmc_free_pinctrl(host) < 0)
+ pr_err("SDC%d free pinctrl failed\n", host->id);
+ mutex_unlock(&host->pin_mutex);
+}
+
+static void acts_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct gl520xmmc_host *host = mmc_priv(mmc);
+ u32 ctrl_reg;
+
+
+
+ if (ios->power_mode != host->power_state) {
+ host->power_state = ios->power_mode;
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ pr_debug("\tMMC_POWER_UP\n");
+ acts_mmc_power_up(host);
+ break;
+ case MMC_POWER_ON:
+ pr_debug("\tMMC_POWER_ON\n");
+ acts_mmc_power_on(host);
+ break;
+ case MMC_POWER_OFF:
+ pr_debug("\tMMC_POWER_OFF\n");
+ acts_mmc_power_off(host);
+ break;
+ default:
+ pr_err("Power mode not supported\n");
+ }
+ }
+
+ if (ios->clock && ios->clock != host->clock) {
+ host->clock = ios->clock;
+ pr_debug("\tSet clock: %d\n", host->clock);
+ acts_mmc_set_clk(host, ios->clock);
+ }
+
+ ctrl_reg = readl(HOST_EN(host));
+ {
+ host->bus_width = ios->bus_width;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl_reg &= ~0x3;
+ ctrl_reg |= 0x2;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl_reg &= ~0x3;
+ ctrl_reg |= 0x1;
+ break;
+ case MMC_BUS_WIDTH_1:
+ ctrl_reg &= ~0x3;
+ break;
+ }
+ }
+
+ if (ios->chip_select != host->chip_select) {
+ host->chip_select = ios->chip_select;
+ switch (ios->chip_select) {
+ case MMC_CS_DONTCARE:
+ break;
+ case MMC_CS_HIGH:
+ ctrl_reg &= ~0x3;
+ ctrl_reg |= 0x1;
+ break;
+ case MMC_CS_LOW:
+ ctrl_reg &= ~0x3;
+ break;
+ }
+ }
+
+ if(ios->timing != host->timing) {
+ host->timing = ios->timing;
+ if (ios->timing == MMC_TIMING_UHS_DDR50){
+ ctrl_reg |= (1 << 2);
+ }
+ }
+
+ switch (ctrl_reg & 0x3) {
+ case 0x2:
+ pr_debug("\tMMC_BUS_WIDTH_8\n");
+ break;
+ case 0x1:
+ pr_debug("\tMMC_BUS_WIDTH_4\n");
+ break;
+ case 0x0:
+ pr_debug("\tMMC_BUS_WIDTH_1\n");
+ break;
+ default:
+ pr_debug("\tMMC_BUS_WIDTH NOT known\n");
+ }
+
+ writel(ctrl_reg, HOST_EN(host));
+}
+
+static void acts_mmc_gpio_check_status(unsigned long data)
+{
+ struct gl520xmmc_host *host = (struct gl520xmmc_host *)data;
+ int card_present;
+
+ if (host->card_detect_reverse)
+ card_present = !!(gpio_get_value(host->detect_pin));
+ else
+ card_present = !(gpio_get_value(host->detect_pin));
+
+ if (card_present ^ host->present) {
+ /* debouncer */
+ mdelay(20);
+
+ if (host->card_detect_reverse)
+ card_present = !!(gpio_get_value(host->detect_pin));
+ else
+ card_present = !(gpio_get_value(host->detect_pin));
+
+ if (card_present ^ host->present) {
+ pr_info("%s: Slot status change detected (%d -> %d)\n",
+ mmc_hostname(host->mmc), host->present,
+ card_present);
+
+ mmc_detect_change(host->mmc, 0);
+
+ host->present = card_present;
+ }
+ }
+ mod_timer(&host->timer, jiffies + HZ/5);
+}
+
+static void acts_mmc_detect_irq_enable(struct gl520xmmc_host *host)
+{
+
+}
+
+static irqreturn_t acts_mmc_detect_irq_handler(int irq, void *devid)
+{
+ return IRQ_NONE;
+}
+
+
+static void acts_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct gl520xmmc_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 state;
+
+ spin_lock_irqsave(&host->lock, flags);
+ state = readl(HOST_STATE(host));
+ if (enable) {
+ state |= SD_STATE_SDIOA_EN;
+ /* default SDIOA, protect irq throw away */
+ state &= ~SD_STATE_SDIOA_P;
+ state &= ~SD_STATE_TEI;
+ } else {
+ state |= SD_STATE_SDIOA_P;
+ state &= ~SD_STATE_SDIOA_EN;
+ state &= ~SD_STATE_TEI;
+ }
+
+ writel(state, HOST_STATE(host));
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static irqreturn_t acts_sdc_irq_handler(int irq, void *devid)
+{
+ struct gl520xmmc_host *host = devid;
+ struct mmc_host *mmc = host->mmc;
+ unsigned long flags;
+ u32 state;
+ u32 temp;
+
+ spin_lock_irqsave(&host->lock, flags);
+ state = readl(HOST_STATE(host));
+ /* check cmd irq */
+ if (state & SD_STATE_TEI) {
+ temp = readl(HOST_STATE(host));
+ temp = temp &(~SD_STATE_SDIOA_P);
+ temp |= SD_STATE_TEI;
+ writel(temp,HOST_STATE(host));
+ complete(&host->sdc_complete);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ /*check sdio date0 irq */
+ if(mmc->caps & MMC_CAP_SDIO_IRQ){
+ if ((state & SD_STATE_SDIOA_P) && (state & SD_STATE_SDIOA_EN)) {
+ DEBUG("wifihost%d:%s %d \n",host->id ,__FUNCTION__,__LINE__);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int _config_read_dma_mode(unsigned int sdc_id)
+{
+ unsigned int dma_mode = -1;
+
+ switch (sdc_id) {
+ case SDC0_SLOT:
+ dma_mode = ATV520X_SDC0RD_DMAMODE;
+ break;
+ case SDC1_SLOT:
+ dma_mode = ATV520X_SDC1RD_DMAMODE;
+ break;
+ case SDC2_SLOT:
+ dma_mode = ATV520X_SDC2RD_DMAMODE;
+ break;
+ default:
+ pr_err("error: MMC/SD slot %d not support\n", sdc_id);
+ return -1;
+ }
+
+ return dma_mode;
+}
+
+static unsigned int _config_write_dma_mode(unsigned int sdc_id)
+{
+ unsigned int dma_mode;
+
+ switch (sdc_id) {
+ case SDC0_SLOT:
+ dma_mode = ATV520X_SDC0WT_DMAMODE;
+ break;
+ case SDC1_SLOT:
+ dma_mode = ATV520X_SDC1WT_DMAMODE;
+ break;
+ case SDC2_SLOT:
+ dma_mode = ATV520X_SDC2WT_DMAMODE;
+ break;
+ default:
+ pr_err("error: MMC/SD slot %d not support\n", sdc_id);
+ return -1;
+ }
+
+ return dma_mode;
+}
+
+
+/* add work to decrese module init mmc for kernel start
+or it better to use mmc_add_host directly
+*/
+static void mmc_host_add_work(struct work_struct *work)
+{
+
+ struct delayed_work *phost_add_work = (struct delayed_work*)work;
+ struct gl520xmmc_host *host =container_of(phost_add_work, \
+ struct gl520xmmc_host,host_add_work);
+ mmc_add_host(host->mmc);
+}
+
+
+static void acts_mmc_finish_request(struct gl520xmmc_host *host)
+{
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+
+ WARN_ON(!host->mrq);
+
+ mrq = host->mrq;
+ host->mrq = NULL;
+
+
+ if (mrq->data) {
+ data = mrq->data;
+
+ /* Finally finished */
+ dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
+ host->dma_dir);
+ }
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+/*
+ * Since send_command can be called by data_complete,
+ * so it should not "finish the request".
+ * acts_mmc_send_command May sleep!
+ */
+
+static int acts_mmc_send_command(struct gl520xmmc_host *host,
+ struct mmc_command *cmd, struct mmc_data *data)
+{
+ u32 mode;
+ u32 rsp[2];
+ unsigned int cmd_rsp_mask = 0;
+ u32 status ;
+
+ cmd->error = 0;
+
+ init_completion(&host->sdc_complete);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ mode = SD_CTL_TM(0);
+ break;
+
+ case MMC_RSP_R1:
+ if (data) {
+ if (data->flags & MMC_DATA_READ)
+ mode = SD_CTL_TM(4);
+ else
+ mode = SD_CTL_TM(5);
+ } else {
+ mode = SD_CTL_TM(1);
+ }
+ cmd_rsp_mask = SD_STATE_CLNR
+ |SD_STATE_CRC7ER;
+
+ break;
+
+ case MMC_RSP_R1B:
+ mode = SD_CTL_TM(3);
+ cmd_rsp_mask = SD_STATE_CLNR
+ |SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R2:
+ mode = SD_CTL_TM(2);
+ cmd_rsp_mask = SD_STATE_CLNR
+ |SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R3:
+ mode = SD_CTL_TM(1);
+ cmd_rsp_mask = SD_STATE_CLNR ;
+ break;
+
+ default:
+ pr_err("no math command RSP flag %x\n", cmd->flags);
+ cmd->error = -1;
+ return MMC_CMD_COMPLETE;
+ }
+
+
+ /* keep current RDELAY & WDELAY value */
+ mode |= (readl(HOST_CTL(host)) & (0xff << 16));
+
+ /* start to send corresponding command type */
+ writel(cmd->arg, HOST_ARG(host));
+ writel(cmd->opcode, HOST_CMD(host));
+ DEBUG("host%d Transfer mode:0x%x\n\tArg:0x%x\n\tCmd:%u\n",
+ host->id,mode, cmd->arg, cmd->opcode);
+
+
+ /*set lbe to send clk after busy*/
+ if(data){
+ mode |= (SD_CTL_TS |SD_CTL_LBE| 0xE4000000);
+ }else{
+ /*pure cmd disable hw timeout and SD_CTL_LBE*/
+ mode &= ~(SD_CTL_TOUTEN|SD_CTL_LBE);
+ mode |= SD_CTL_TS ;
+ }
+
+ /* start transfer */
+ writel(mode, HOST_CTL(host));
+
+ pr_debug("SDC%d send CMD%d, SD_CTL=0x%x\n", host->id,
+ cmd->opcode, readl(HOST_CTL(host)));
+
+ /* date cmd return */
+ if(data){
+ return DATA_CMD;
+ }
+
+ /*
+ *wait for cmd finish
+ * Some bad card dose need more time to complete
+ * data transmission and programming.
+ */
+ if (!wait_for_completion_timeout(&host->sdc_complete, 30*HZ)) {
+ pr_err("!!!host%d:cmd wait ts interrupt timeout\n",host->id);
+ cmd->error = CMD_TS_TIMEOUT;
+ act_dump_reg(host);
+ goto out;
+ }
+
+ DEBUG("host%d: wait cmd sdc_complete OK\n",host->id);
+
+ status = readl(HOST_STATE(host));
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd_rsp_mask & status ){
+ if ( status & SD_STATE_CLNR ) {
+ cmd->error = CMD_RSP_ERR;
+ #if 0
+ pr_err("SDC%d send CMD%d: CMD_NO_RSP...\n",
+ host->id, cmd->opcode);
+ #endif
+ goto out;
+ }
+
+ if ( status & SD_STATE_CRC7ER) {
+
+ cmd->error = -EILSEQ;
+
+ host->read_delay_chain --;
+ if(host->read_delay_chain < 0){
+ host->read_delay_chain = 0xf;
+ }
+
+ writel((readl(HOST_CTL(host)) & (~(0xff << 16))) |
+ SD_CTL_RDELAY(host->read_delay_chain) |
+ SD_CTL_WDELAY(host->write_delay_chain),
+ HOST_CTL(host));
+
+ printk("cmd:try read delay chain:%d\n",
+ host->read_delay_chain);
+
+ pr_err("cmd:SDC%d send CMD%d, CMD_RSP_CRC_ERR\n",
+ host->id, cmd->opcode);
+
+ goto out;
+ }
+
+ }
+
+ if (cmd->flags & MMC_RSP_136) {
+ /*TODO: MSB first */
+ cmd->resp[3] = readl(HOST_RSPBUF0(host));
+ cmd->resp[2] = readl(HOST_RSPBUF1(host));
+ cmd->resp[1] = readl(HOST_RSPBUF2(host));
+ cmd->resp[0] = readl(HOST_RSPBUF3(host));
+ } else {
+ rsp[0] = readl(HOST_RSPBUF0(host));
+ rsp[1] = readl(HOST_RSPBUF1(host));
+ cmd->resp[0] = rsp[1] << 24 | rsp[0] >> 8;
+ cmd->resp[1] = rsp[1] >> 8;
+ }
+ }
+
+out:
+ return PURE_CMD;
+
+}
+
+static void acts_mmc_dma_complete(void *dma_async_param)
+{
+ struct gl520xmmc_host *host = (struct gl520xmmc_host *)dma_async_param;
+ BUG_ON( !host->mrq->data);
+
+ if(host->dma_terminate == true){
+ host->dma_terminate = false;
+ printk("%s:return for dmaengine_terminate_all\n",__FUNCTION__);
+ return ;
+ }
+
+ if( host->mrq->data ){
+ complete(&host->dma_complete);
+ host->dmaflag = true;
+ }
+}
+
+
+
+static int acts_mmc_prepare_data(struct gl520xmmc_host *host,
+ struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ enum dma_transfer_direction slave_dirn;
+ int i, sglen;
+ unsigned total;
+
+ host->dmaflag = false;
+ host->dma_terminate = false ;
+
+ writel(readl(HOST_EN(host)) | SD_EN_BSEL, HOST_EN(host));
+
+ writel(data->blocks, HOST_BLK_NUM(host));
+ writel(data->blksz, HOST_BLK_SIZE(host));
+
+ total = data->blksz * data->blocks;
+
+ if (total < 512)
+ writel(total , HOST_BUF_SIZE(host));
+ else
+ writel(512, HOST_BUF_SIZE(host));
+
+ /*
+ * We don't do DMA on "complex" transfers, i.e. with
+ * non-word-aligned buffers or lengths.
+ */
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->offset & 3 || sg->length & 3)
+ pr_err("SD tag: non-word-aligned buffers or lengths.\n");
+ }
+
+ if (data->flags & MMC_DATA_READ) {
+ host->dma_dir = DMA_FROM_DEVICE;
+ host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+ } else if (data->flags & MMC_DATA_WRITE) {
+ host->dma_dir = DMA_TO_DEVICE;
+ host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+ } else {
+ BUG_ON(1);
+ }
+
+ sglen = dma_map_sg(host->dma->device->dev, data->sg,
+ data->sg_len, host->dma_dir);
+
+ host->dma_slave.dma_dev = host->dma->device->dev;
+ host->dma_slave.trans_type = SLAVE;
+ if (data->flags & MMC_DATA_READ)
+ host->dma_slave.mode = _config_read_dma_mode(host->id);
+ else
+ host->dma_slave.mode = _config_write_dma_mode(host->id);
+
+ host->dma->private = (void *)&host->dma_slave;
+ if (dmaengine_slave_config(host->dma, &host->dma_conf))
+ pr_err("Failed to config DMA channel\n");
+
+ host->desc = dmaengine_prep_slave_sg(host->dma,
+ data->sg, sglen, slave_dirn,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!host->desc) {
+ pr_err("dmaengine_prep_slave_sg() fail\n");
+ return -EBUSY;
+ }
+
+ host->desc->callback = acts_mmc_dma_complete;
+ host->desc->callback_param = host;
+ /*
+ *init for adjust delay chain
+ */
+ data->error = 0;
+
+
+ return 0;
+}
+
+static int acts_mmc_card_exist(struct mmc_host *mmc)
+{
+ struct gl520xmmc_host *host = mmc_priv(mmc);
+ int present;
+
+ if (mmc_card_expected_mem(host->type_expected)) {
+ if (detect_use_gpio) {
+ if (host->card_detect_reverse)
+ present = !!(gpio_get_value(host->detect_pin));
+ else
+ present = !(gpio_get_value(host->detect_pin));
+
+ return present;
+ }
+ }
+
+ if (mmc_card_expected_wifi(host->type_expected))
+ return host->sdio_present;
+
+ return -ENOSYS;
+}
+
+static void acts_mmc_err_reset(struct gl520xmmc_host *host)
+{
+ u32 reg_en, reg_ctr, reg_state;
+
+ reg_en = readl(HOST_EN(host));
+ reg_ctr = readl(HOST_CTL(host));
+ reg_state = readl(HOST_STATE(host));
+
+ module_reset(host->module_id);
+
+ writel(SD_ENABLE, HOST_EN(host));
+ writel(reg_en, HOST_EN(host));
+ reg_ctr &=~SD_CTL_TS;
+ writel(reg_ctr, HOST_CTL(host));
+ writel(reg_state, HOST_STATE(host));
+}
+
+#define DEBUG_ERR 0
+
+static void acts_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct gl520xmmc_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ // check card is removed
+ if ((acts_mmc_card_exist(mmc) == 0)&&\
+ (!(mmc->caps&MMC_CAP_NONREMOVABLE))){
+ mrq->cmd->error = -ENOMEDIUM;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ /*
+ * the pointer being not NULL means we are making request on sd/mmc,
+ * which will be reset to NULL in finish_request.
+ */
+
+#if DEBUG_ERR
+ if(mrq->data){
+ if(time++ >300 ){
+ time = 0;
+
+ if(mrq->data->flags & MMC_DATA_READ){
+ host->read_delay_chain = 0xd;
+ pr_err("set error read_delay_chain:0x%x\n",host->read_delay_chain );
+ }else {
+ host->write_delay_chain = 0xd;
+ pr_err("set error write_delay_chain:0x%x\n",host->write_delay_chain );
+
+ }
+
+ writel((readl(HOST_CTL(host)) & (~(0xff << 16))) |
+ SD_CTL_RDELAY(host->read_delay_chain) |
+ SD_CTL_WDELAY(host->write_delay_chain),
+ HOST_CTL(host));
+
+ }
+ }
+#endif
+
+ host->mrq = mrq;
+
+ if (mrq->data) {
+ ret = acts_mmc_prepare_data(host, mrq->data);
+ if (ret != 0) {
+ pr_err("SD DMA transfer: prepare data error\n");
+ mrq->data->error = ret;
+ acts_mmc_finish_request(host);
+ return;
+ } else {
+ init_completion(&host->dma_complete);
+ dmaengine_submit(host->desc);
+ dma_async_issue_pending(host->dma);
+ }
+ }
+
+ ret = acts_mmc_send_command(host, mrq->cmd, mrq->data);
+
+ if( ret == DATA_CMD){
+
+ if (!wait_for_completion_timeout(&host->sdc_complete, 10*HZ)) {
+ pr_err("!!!host%d:wait date transfer ts intrupt timeout\n",host->id);
+ }
+
+ DEBUG("host%d: wait date sdc_complete OK\n",host->id);
+
+ if(act_check_trs_date_status(host , mrq)){
+ pr_err("!!!host%d err:act_check_trs_date_status\n",host->id);
+ act_dump_reg(host);
+ pr_err("Entry SD/MMC module error reset\n");
+
+ host->dma_terminate = true ;
+ dmaengine_terminate_all(host->dma);
+
+ acts_mmc_err_reset(host);
+ pr_err("Exit SD/MMC module error reset\n");
+ goto finish;
+ }
+
+ if (!wait_for_completion_timeout(&host->dma_complete, 5*HZ)) {
+ pr_err("!!!host%d:dma transfer completion timeout\n",host->id);
+
+ pr_err("!!!host%d:dmaflag:%d\n",host->id ,host->dmaflag);
+ mrq->data->error = CMD_DATA_TIMEOUT;
+ mrq->cmd->error = -ETIMEDOUT;
+ act_dump_reg(host);
+ pr_err("Entry SD/MMC module error reset\n");
+ host->dma_terminate = true ;
+ dmaengine_terminate_all(host->dma);
+ acts_mmc_err_reset(host);
+ pr_err("Exit SD/MMC module error reset\n");
+ goto finish;
+
+ }
+
+ DEBUG("host%d: wait date dma_complete OK\n",host->id);
+
+ if (mrq->data->stop){
+ /* send stop cmd */
+ acts_mmc_send_command(host, mrq->data->stop, NULL);
+ if(mrq->data->stop->error){
+ act_dump_reg(host);
+ pr_err("Entry SD/MMC module error reset\n");
+ acts_mmc_err_reset(host);
+ pr_err("Exit SD/MMC module error reset\n");
+ goto finish;
+ }
+ }
+
+ mrq->data->bytes_xfered = mrq->data->blocks *
+ mrq->data->blksz;
+
+ }
+
+finish:
+ acts_mmc_finish_request(host);
+}
+
+static void act_dump_reg(struct gl520xmmc_host *host)
+{
+ acts_dump_mfp(host);
+ acts_dump_sdc(host);
+ acts_dump_dmac(host);
+}
+
+
+/* check status reg is ok */
+static int act_check_trs_date_status(
+ struct gl520xmmc_host *host , struct mmc_request *mrq)
+{
+
+ struct mmc_data *data = mrq->data;
+ struct mmc_command *cmd = mrq->cmd;
+ u32 status =readl(HOST_STATE(host)) ;
+ u32 check_status = 0;
+ u32 cmd_rsp_mask = 0;
+
+ if (!host || !host->mrq || !host->mrq->data) {
+ pr_err("SDC%d when DMA finish, request is NULL\n",
+ host->id);
+ return -EINVAL;
+ }
+
+ cmd_rsp_mask = SD_STATE_TOUTE
+ |SD_STATE_CLNR
+ |SD_STATE_WC16ER
+ |SD_STATE_RC16ER
+ |SD_STATE_CRC7ER;
+
+ check_status = status &cmd_rsp_mask;
+
+ if(check_status){
+ if(check_status & SD_STATE_TOUTE){
+ pr_err("data:card HW timeout error\n");
+ data->error = HW_TIMEOUT;
+ goto out;
+
+ }
+ if(check_status & SD_STATE_CLNR){
+ pr_err("data:card cmd line no respond error\n");
+ data->error = CMD_RSP_ERR;
+ goto out;
+ }
+ if(check_status & SD_STATE_WC16ER){
+
+ pr_err("data:card write:crc error\n");
+ data->error = DATA_WR_CRC_ERR;
+ cmd->error = -EILSEQ;
+ goto out;
+ }
+ if(check_status & SD_STATE_RC16ER){
+ pr_err("data:card read:crc error\n");
+ data->error = DATA_RD_CRC_ERR;
+ cmd->error = -EILSEQ;
+ goto out;
+ }
+ if(check_status & SD_STATE_CRC7ER){
+ pr_err("data: cmd CMD_RSP_CRC_ERR\n");
+ data->error = CMD_RSP_CRC_ERR;
+ cmd->error = -EILSEQ;
+ goto out;
+ }
+ }
+
+out:
+ if((data->error == DATA_RD_CRC_ERR) ||
+ (data->error == CMD_RSP_CRC_ERR)){
+ host->read_delay_chain --;
+ if(host->read_delay_chain < 0){
+ host->read_delay_chain = 0xf;
+ }
+
+ printk("try read delay chain:%d\n",
+ host->read_delay_chain);
+
+ }else if(data->error == DATA_WR_CRC_ERR){
+
+ host->write_delay_chain --;
+ if(host->write_delay_chain < 0){
+ host->write_delay_chain = 0xf;
+ }
+
+ printk("try write delay chain:%d\n",
+ host->write_delay_chain);
+ }
+
+ if(data->error == DATA_WR_CRC_ERR||
+ data->error == DATA_RD_CRC_ERR){
+ writel((readl(HOST_CTL(host)) & (~(0xff << 16))) |
+ SD_CTL_RDELAY(host->read_delay_chain) |
+ SD_CTL_WDELAY(host->write_delay_chain),
+ HOST_CTL(host));
+ }
+
+ return (data->error);
+}
+
+static int acts_mmc_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct gl520xmmc_host *host = mmc_priv(mmc);
+ u32 state;
+ int i = 0;
+ int ret = 0;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ goto out;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+
+ /* stop send clk until 5ms later */
+ mdelay(5);
+
+ /* switch host voltage to 1.8v and delay 10ms */
+ writel(readl(HOST_EN(host)) | SD_EN_S18EN,
+ HOST_EN(host));
+ mdelay(10);
+
+ /* send continuous clock */
+ writel(readl(HOST_CTL(host)) | SD_CTL_SCC,
+ HOST_CTL(host));
+ for (i = 0; i < 100; i++) {
+ state = readl(HOST_STATE(host));
+ if ((state & SD_STATE_CMDS) && (state & SD_STATE_DAT0S))
+ break;
+ udelay(100);
+ }
+ if (i >= 100) { /* max 10ms */
+ pr_err("SDC%d level error, voltage switch failed\n",
+ host->id);
+ ret = -EINVAL;
+ }
+
+ /* stop to send continuous clock */
+ writel(readl(HOST_CTL(host)) & ~SD_CTL_SCC,
+ HOST_CTL(host));
+ }
+
+out:
+
+ return ret;
+}
+
+static int acts_mmc_busy(struct mmc_host *mmc)
+{
+ unsigned int state;
+ unsigned int i = 0;
+ struct gl520xmmc_host *host = mmc_priv(mmc);
+ state = readl(HOST_STATE(host));
+ while ((state & SD_STATE_CMDS) || (state & SD_STATE_DAT0S)) {
+
+ if (i >= 100) { /* max 10ms */
+ pr_err("%s:SDC%d level error, CMD11 send failed\n",
+ __FUNCTION__,host->id);
+ return 0;
+ }
+ i++;
+ udelay(100);
+ state = readl(HOST_STATE(host));
+ }
+ return 1; //not busy
+}
+
+static int acts_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct gl520xmmc_host *host = mmc_priv(mmc);
+ int status = -ENOSYS;
+ int read_only = -ENOSYS;
+
+ if (gpio_is_valid(host->wpswitch_gpio)) {
+ status = gpio_request(host->wpswitch_gpio,
+ "wp_switch");
+ if (status) {
+ pr_err("%s: %s: Failed to request GPIO %d\n",
+ mmc_hostname(mmc), __func__,
+ host->wpswitch_gpio);
+ } else {
+ status = gpio_direction_input(host->wpswitch_gpio);
+ if (!status) {
+ /*
+ * Wait for atleast 300ms as debounce
+ * time for GPIO input to stabilize.
+ */
+ msleep(300);
+
+ /*
+ * SD card write protect switch on, high level.
+ */
+ read_only = gpio_get_value_cansleep(
+ host->wpswitch_gpio);
+
+ }
+ gpio_free(host->wpswitch_gpio);
+ }
+ }
+
+ pr_info("%s: Card read-only status %d\n", __func__, read_only);
+
+ return read_only;
+}
+
+static const struct mmc_host_ops acts_mmc_ops = {
+ .get_cd = acts_mmc_card_exist,
+ .request = acts_mmc_request,
+ .set_ios = acts_mmc_set_ios,
+ .enable_sdio_irq = acts_mmc_enable_sdio_irq,
+ .start_signal_voltage_switch = acts_mmc_signal_voltage_switch,
+ .card_busy = acts_mmc_busy,
+ .get_ro = acts_mmc_get_ro,
+ .switch_sd_pinctr = owl_switch_sd_pinctr,
+ .switch_uart_pinctr = owl_switch_uart_pinctr,
+};
+
+static int acts_mmc_clkfreq_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct clk_notifier_data *clk_nd = data;
+ unsigned long old_rate, new_rate;
+ struct gl520xmmc_host *host;
+
+ host = container_of(nb, struct gl520xmmc_host, nblock);
+ old_rate = clk_nd->old_rate;
+ new_rate = clk_nd->new_rate;
+
+ if (action == PRE_RATE_CHANGE) {
+ /* pause host dma transfer */
+ } else if (action == POST_RATE_CHANGE) {
+ /* acts_mmc_set_clk(host, new_rate); */
+ /* resume to start dma transfer */
+ }
+
+ return NOTIFY_OK;
+}
+
+static inline void acts_mmc_sdc_config(struct gl520xmmc_host *host)
+{
+ if (host->start & 0x4000) {
+ //res->start
+ host->id = SDC1_SLOT;
+ host->module_id = MOD_ID_SD1;
+ host->pad_drv = SDC1_PAD_DRV;
+ host->wdelay.delay_lowclk = SDC1_WDELAY_LOW_CLK;
+ host->wdelay.delay_midclk = SDC1_WDELAY_MID_CLK;
+ host->wdelay.delay_highclk = SDC1_WDELAY_HIGH_CLK;
+ host->rdelay.delay_lowclk = SDC1_RDELAY_LOW_CLK;
+ host->rdelay.delay_midclk = SDC1_RDELAY_MID_CLK;
+ host->rdelay.delay_highclk = SDC1_RDELAY_HIGH_CLK;
+ } else if (host->start& 0x8000) {
+ host->id = SDC2_SLOT;
+ host->module_id = MOD_ID_SD2;
+ host->pad_drv = SDC2_PAD_DRV;
+ host->wdelay.delay_lowclk = SDC2_WDELAY_LOW_CLK;
+ host->wdelay.delay_midclk = SDC2_WDELAY_MID_CLK;
+ host->wdelay.delay_highclk = SDC2_WDELAY_HIGH_CLK;
+ host->rdelay.delay_lowclk = SDC2_RDELAY_LOW_CLK;
+ host->rdelay.delay_midclk = SDC2_RDELAY_MID_CLK;
+ host->rdelay.delay_highclk = SDC2_RDELAY_HIGH_CLK;
+ } else {
+ host->id = SDC0_SLOT;
+ host->module_id = MOD_ID_SD0;
+ host->pad_drv = SDC0_PAD_DRV;
+ host->wdelay.delay_lowclk = SDC0_WDELAY_LOW_CLK;
+ host->wdelay.delay_midclk = SDC0_WDELAY_MID_CLK;
+ host->wdelay.delay_highclk = SDC0_WDELAY_HIGH_CLK;
+ host->rdelay.delay_lowclk = SDC0_RDELAY_LOW_CLK;
+ host->rdelay.delay_midclk = SDC0_RDELAY_MID_CLK;
+ host->rdelay.delay_highclk = SDC0_RDELAY_HIGH_CLK;
+ }
+}
+
+const int of_get_card_detect_mode(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "card_detect_mode", &pm);
+ if (err < 0)
+ return err;
+ for (i = 0; i < ARRAY_SIZE(card_detect_modes); i++)
+ if (!strcasecmp(pm, card_detect_modes[i]))
+ return i;
+ pr_err("error: please chose card detect method\n");
+ return -ENODEV;
+}
+
+const int of_get_card_type(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "card_type", &pm);
+ if (err < 0)
+ return err;
+ for (i = 0; i < ARRAY_SIZE(card_types); i++)
+ if (!strcasecmp(pm, card_types[i]))
+ return i;
+ pr_err("error: please make sure card type is exist\n");
+ return -ENODEV;
+}
+
+static int acts_mmc_get_power(struct gl520xmmc_host *host,
+ struct device_node *np)
+{
+ const char *pm;
+ int err;
+
+ if (of_find_property(np, "sd_vcc", NULL)) {
+ err = of_property_read_string(np, "sd_vcc", &pm);
+ if (err < 0) {
+ pr_err("SDC[%u] can not read SD_VCC power source\n",
+ host->id);
+ return -1;
+ }
+
+ host->reg = regulator_get(NULL, pm);
+ if (IS_ERR(host->reg)) {
+ pr_err("SDC[%u] failed to get regulator %s\n",
+ host->id, "sd_vcc");
+ return -1;
+ }
+
+ act_mmc_opt_regulator(host,REG_ENABLE);
+ }
+
+ return 0;
+}
+
+static int alloc_mmc_add_host_workqueue(struct gl520xmmc_host *host)
+{
+
+ char mmc_add_host_wq_name[OWL_MMC_WORK_QUEUE_NAME]={0};
+
+ snprintf (mmc_add_host_wq_name, OWL_MMC_WORK_QUEUE_NAME,\
+ "host_add_work%d",host->id);
+
+ host->add_host_wq = alloc_workqueue(mmc_add_host_wq_name,
+ WQ_MEM_RECLAIM, 1);
+
+ if(NULL == host->add_host_wq){
+ printk("%s:alloc mmc_host_add workqueue fail\n",__FUNCTION__);
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&(host->host_add_work), mmc_host_add_work);
+
+ return 0;
+
+}
+void cancel_mmc_work(struct mmc_host *host );
+void start_mmc_work(struct mmc_host *host );
+
+#ifdef CONFIG_EARLYSUSPEND
+
+static void mmc_early_suspend(struct early_suspend *handler)
+{
+
+ struct gl520xmmc_host*host =NULL;
+ struct mmc_host *mmc;
+
+ host = container_of(handler,struct gl520xmmc_host,mmc_es_handler);
+ mmc = host->mmc ;
+ if((mmc_card_expected_mem(host->type_expected))&&\
+ (mmc->caps& MMC_CAP_NEEDS_POLL)){
+ host->mmc_early_suspend = 1;
+ printk("hostid:%d,mmc_early_suspend:host->mmc_early_suspend=%d\n",host->id,host->mmc_early_suspend);
+ }
+
+}
+
+static void mmc_late_resume(struct early_suspend *handler)
+{
+ struct gl520xmmc_host*host =NULL;
+ struct mmc_host *mmc;
+
+ host = container_of(handler,struct gl520xmmc_host,mmc_es_handler);
+ mmc = host->mmc ;
+
+ if((mmc_card_expected_mem(host->type_expected)&&\
+ (mmc->caps& MMC_CAP_NEEDS_POLL))){
+ host->mmc_early_suspend = 0;
+ start_mmc_work(mmc);
+ printk("hostid:%d,mmc_late_resume:host->mmc_early_suspend=%d\n",host->id,host->mmc_early_suspend);
+ }
+}
+#endif
+
+static int owl_upgrade_flag = OWL_NORMAL_BOOT;
+
+static int __init owl_check_upgrade(char *__unused)
+{
+ owl_upgrade_flag = OWL_UPGRADE;
+ printk("%s:owl_upgrade_flag is OWL_UPGRADE\n",__FUNCTION__);
+ return 0 ;
+}
+
+__setup("owl_upgrade", owl_check_upgrade);
+
+static int owl_mmc_resan(struct gl520xmmc_host * host,
+ unsigned long delay)
+{
+ int bootdev;
+
+ bootdev = owl_get_boot_dev();
+ if(mmc_card_expected_mem(host->type_expected)||
+ mmc_card_expected_wifi(host->type_expected) ){
+ printk("host%d: sure rescan mmc\n",host->id);
+ queue_delayed_work(host->add_host_wq, &host->host_add_work, delay);
+ }else if(mmc_card_expected_emmc(host->type_expected)){
+ if((owl_upgrade_flag == OWL_UPGRADE)||((bootdev !=OWL_BOOTDEV_NAND)&&\
+ (bootdev !=OWL_BOOTDEV_SD02NAND))){
+ queue_delayed_work(host->add_host_wq, &host->host_add_work, delay);
+ }else{
+ host->pcl = pinctrl_get_select_default(host->mmc->parent);
+ if (IS_ERR(host->pcl)) {
+ pr_err("%s:SDC%d get default pinctrl failed, %ld\n",
+ __FUNCTION__,host->id, PTR_ERR(host->pcl));
+ return (int)PTR_ERR(host->pcl);
+ }
+ pinctrl_put(host->pcl);
+ pinctrl_put(host->pcl);
+ printk("host%d:there is no need to resan emmc\n",host->id);
+ }
+ }else{
+ printk("!!!!!error: mmc type is error\n");
+ return -1;
+
+ }
+ return 0;
+}
+
+/*
+* only when sd0 used for sdcard ,
+* we set uart ux rx vaild,sd0 clk cmd vaild,
+* so it rescan sdcard and uart pin is vaild
+*/
+static int owl_requeset_share_uart_sd0_pinctr(struct mmc_host *mmc)
+{
+ struct gl520xmmc_host *host;
+ host = mmc_priv(mmc);
+
+ if((host->sdio_uart_supported)&&\
+ (mmc_card_expected_mem(host->type_expected))){
+
+ sdio_uart_pinctrl_free();
+ host->pcl = pinctrl_get_select(mmc->parent, PINCTRL_UART_PIN);
+ if (IS_ERR(host->pcl)) {
+ pr_err("SDC%d get misc uart pinctrl failed, %ld\n",
+ host->id, PTR_ERR(host->pcl));
+ return (int)PTR_ERR(host->pcl);
+ host->switch_pin_flag = ERR_PIN;
+ }
+ host->switch_pin_flag = UART_PIN ;
+
+ }
+ return 0;
+}
+static int __init acts_mmc_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct gl520xmmc_host *host;
+ struct resource *res;
+ dma_cap_mask_t mask;
+ struct device_node *dn = pdev->dev.of_node;
+ struct wlan_plat_data *pdata;
+ int ret = 0;
+ mmc = mmc_alloc_host(sizeof(struct gl520xmmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "require memory for mmc_host failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host = mmc_priv(mmc);
+ spin_lock_init(&host->lock);
+ mutex_init(&host->pin_mutex);
+ host->mmc = mmc;
+ host->power_state = host->bus_width = host->chip_select = -1;
+ host->clock = 0;
+ host->mrq = NULL;
+ host->switch_pin_flag = ERR_PIN; //for init pin stat
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no memory resource\n");
+ ret = -ENODEV;
+ goto out_free_host;
+ }
+
+ if (!request_mem_region(res->start,
+ resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Unable to request register region\n");
+ return -EBUSY;
+ }
+
+ host->iobase = ioremap(res->start, resource_size(res));
+ if (host->iobase == NULL) {
+ dev_err(&pdev->dev, "Unable to ioremap register region\n");
+ return -ENXIO;
+ }
+
+
+ host->start = res->start;
+
+
+ host->type_expected = of_get_card_type(dn);
+ if (host->type_expected < 0)
+ goto out_free_host;
+
+ acts_mmc_sdc_config(host);
+
+ if(alloc_mmc_add_host_workqueue(host)){
+ pr_info("SDC%d request dma queue fail\n", host->id);
+ goto err_dma_workqueue;
+ }
+
+ ret = module_clk_get(host);
+ if (ret < 0)
+ goto err_add_host_workqueue;
+
+ memset(&host->nblock, 0, sizeof(host->nblock));
+ host->nblock.notifier_call = acts_mmc_clkfreq_notify;
+ /* clk_notifier_register(host->clk, &host->nblock); */
+
+ ret = acts_mmc_get_power(host, dn);
+ if (ret < 0)
+ goto out_put_clk;
+
+ if (of_find_property(dn, "sdio_uart_supported", NULL)) {
+ host->sdio_uart_supported = 1;
+ pr_info("SDC%d use sdio uart conversion\n", host->id);
+ }
+
+ if (of_find_property(dn, "card_detect_reverse", NULL)) {
+ host->card_detect_reverse = 1;
+ pr_info("SDC%d detect sd card use reverse power-level\n",
+ host->id);
+ }
+
+ /* MT5931 SDIO WiFi need to send continuous clock */
+ if (mmc_card_expected_wifi(host->type_expected)) {
+ if (of_find_property(dn, "send_continuous_clock", NULL)) {
+ host->send_continuous_clock = 1;
+ pr_info("SDC%d wifi send continuous clock\n", host->id);
+ }
+ }
+
+ /* Request DMA channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma = dma_request_channel(mask, NULL, NULL);
+ if (!host->dma) {
+ dev_err(&pdev->dev, "Failed to request DMA channel\n");
+ ret = -ENODEV;
+ goto out_put_regulator;
+ }
+
+ dev_info(&pdev->dev, "using %s for DMA transfers\n",
+ dma_chan_name(host->dma));
+
+ host->dma_conf.src_addr = HOST_DAT_DMA(host);
+ host->dma_conf.dst_addr = HOST_DAT_DMA(host);
+ host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_conf.device_fc = false;
+
+ mmc->ops = &acts_mmc_ops;
+
+ mmc->f_min = 100000;
+ if(SDC0_SLOT == host->id)
+ /*5206 sd0 max 50Hz*/
+ mmc->f_max = 50000000;
+ else
+ mmc->f_max = 100000000;
+ mmc->max_seg_size = 256 * 512;
+ mmc->max_segs = 128;
+ mmc->max_req_size = 512 * 256;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 256;
+
+ mmc->ocr_avail = ACTS_MMC_OCR;
+ mmc->caps = MMC_CAP_NEEDS_POLL | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA;
+
+ mmc->caps2 = MMC_CAP2_BOOTPART_NOACC;
+
+ if (of_find_property(dn, "one_bit_width", NULL))
+ mmc->caps &= ~MMC_CAP_4_BIT_DATA;
+
+ if (mmc_card_expected_emmc(host->type_expected))
+ mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50|MMC_CAP_8_BIT_DATA;
+ //emmc and sd card support earse (discard,trim,sediscard)
+ if(mmc_card_expected_emmc(host->type_expected)||\
+ mmc_card_expected_mem(host->type_expected)){
+ mmc->caps |= MMC_CAP_ERASE;
+ }
+
+ /* SD3.0 support */
+ if (SDC0_SLOT == host->id) {
+ mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 ;
+
+ mmc->caps |= MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
+ MMC_CAP_SET_XPC_180;
+
+ mmc->caps |= MMC_CAP_MAX_CURRENT_800;
+ }
+
+
+ if(owl_requeset_share_uart_sd0_pinctr(mmc)){
+ ret = -1;
+ dev_err(&pdev->dev,
+ "proble requeset share uart sd0 pinctr fail\n");
+ goto out_free_dma;
+ }
+
+ if (mmc_card_expected_mem(host->type_expected)) {
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "can not get sdc transfer end irq resource\n");
+ ret = -ENODEV;
+ goto out_free_dma;
+ }
+ host->sdc_irq = res->start;
+ ret = request_irq(host->sdc_irq,
+ (irq_handler_t)acts_sdc_irq_handler, 0,
+ "sdcard", host);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "request SDC transfer end interrupt failed\n");
+ goto out_free_dma;
+ }
+
+ host->card_detect_mode = of_get_card_detect_mode(dn);
+
+ if (host->card_detect_mode == SIRQ_DETECT_CARD) {
+ detect_use_sirq = 1;
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+
+ /* SIRQ */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "no card detect irq resource\n");
+ ret = -ENODEV;
+ goto out_free_sdc_irq;
+ }
+
+ host->detect = host->detect_sirq = res->start;
+ acts_mmc_detect_irq_enable(host);
+
+ ret = request_irq(host->detect,
+ (irq_handler_t)acts_mmc_detect_irq_handler,
+ 0, "card-detect", host);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to request card detect SIRQ%d\n",
+ host->detect);
+ goto out_free_sdc_irq;
+ }
+
+ host->detect_irq_registered = 1;
+ } else if (host->card_detect_mode == GPIO_DETECT_CARD) {
+ pr_info("use GPIO to detect SD/MMC card\n");
+ detect_use_gpio = 1;
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+
+ /* card write protecte switch gpio */
+ host->wpswitch_gpio = -ENOENT;
+
+ if (of_find_property(dn, "wp_switch_gpio", NULL)) {
+ host->wpswitch_gpio = of_get_named_gpio(dn,
+ "wp_switch_gpio", 0);
+ }
+
+ /* card detect gpio */
+ host->detect_pin = of_get_named_gpio(dn, "card_detect_gpios", 0);
+ if (gpio_is_valid(host->detect_pin)) {
+ ret = gpio_request(host->detect_pin,
+ "card_detect_gpio");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't claim card detect gpio pin\n");
+ goto out_free_sdc_irq;
+ }
+ gpio_direction_input(host->detect_pin);
+ } else {
+ dev_err(&pdev->dev, "card detect gpio pin invalid\n");
+ goto out_free_sdc_irq;
+ }
+
+ host->present = 0;
+
+ init_timer(&host->timer);
+ host->timer.data = (unsigned long)host;
+ host->timer.function = acts_mmc_gpio_check_status;
+ host->timer.expires = jiffies + HZ;
+ add_timer(&host->timer);
+ } else if (host->card_detect_mode == COMMAND_DETECT_CARD) {
+ #ifdef CONFIG_EARLYSUSPEND
+ host->mmc_early_suspend = 0;
+ host->mmc_es_handler.suspend =mmc_early_suspend;
+ host->mmc_es_handler.resume = mmc_late_resume;
+ register_early_suspend(&(host->mmc_es_handler));
+ #endif
+ pr_info("use COMMAND to detect SD/MMC card\n");
+ } else {
+ pr_err("please choose card detect method\n");
+ }
+ } else if (mmc_card_expected_wifi(host->type_expected)) {
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no SDIO irq resource\n");
+ ret = -ENODEV;
+ goto out_free_dma;
+ }
+
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ host->sdio_irq = res->start;
+ ret = request_irq(host->sdio_irq,
+ (irq_handler_t)acts_sdc_irq_handler, 0,
+ "sdio", host);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request SDIO interrupt failed\n");
+ goto out_free_dma;
+ }
+
+ /* dummy device for power control */
+ acts_wlan_status_check_register(host);
+
+ pdata = acts_wlan_device.dev.platform_data;
+ pdata->parent = pdev;
+ ret = platform_device_register(&acts_wlan_device);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register dummy wifi device\n");
+ goto out_free_sdio_irq;
+ }
+
+ /* (wifi & bt) power control init */
+ acts_wlan_bt_power_init(pdata);
+
+ } else if (mmc_card_expected_emmc(host->type_expected)) {
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "can not get sdc transfer end irq resource\n");
+ ret = -ENODEV;
+ goto out_free_dma;
+ }
+ host->sdc_irq = res->start;
+ ret = request_irq(host->sdc_irq,
+ (irq_handler_t)acts_sdc_irq_handler, 0,
+ "emmc", host);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "request eMMC SDC transfer end interrupt failed\n");
+ goto out_free_dma;
+ }
+ } else {
+ dev_err(&pdev->dev, "SDC%d not supported %d\n",
+ host->id, host->type_expected);
+ ret = -ENXIO;
+ goto out_free_dma;
+ }
+
+ ret = owl_mmc_resan(host,10);
+ if(ret){
+ goto out_free_sdio_irq;
+ }
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+out_free_sdio_irq:
+ /* SDIO WiFi card */
+ if (mmc_card_expected_wifi(host->type_expected))
+ free_irq(host->sdio_irq, host);
+
+ /* memory card */
+ if (mmc_card_expected_mem(host->type_expected)) {
+ if (gpio_is_valid(host->detect_pin)) {
+ del_timer_sync(&host->timer);
+ gpio_free(host->detect_pin);
+ }
+
+ if (host->detect_irq_registered)
+ free_irq(host->detect, host);
+ }
+
+out_free_sdc_irq:
+ if (mmc_card_expected_mem(host->type_expected))
+ free_irq(host->sdc_irq, host);
+
+ if (mmc_card_expected_emmc(host->type_expected))
+ free_irq(host->sdc_irq, host);
+
+out_free_dma:
+ if (host->dma)
+ dma_release_channel(host->dma);
+
+out_put_regulator:
+ if (host->reg) {
+ regulator_disable(host->reg);
+ regulator_put(host->reg);
+ }
+
+out_put_clk:
+ if (host->clk) {
+ /* clk_notifier_unregister(host->clk, &host->nblock); */
+ clk_put(host->clk);
+ }
+err_add_host_workqueue:
+ destroy_workqueue(host->add_host_wq);
+
+err_dma_workqueue:
+ destroy_workqueue(host->dma_wq);
+
+out_free_host:
+ mmc_free_host(mmc);
+
+out:
+ return ret;
+}
+
+static int __exit acts_mmc_remove(struct platform_device *pdev)
+{
+ struct gl520xmmc_host *host = platform_get_drvdata(pdev);
+
+ if (host) {
+ mmc_remove_host(host->mmc);
+
+ if (mmc_card_expected_wifi(host->type_expected)) {
+ acts_wlan_bt_power_release();
+ platform_device_unregister(&acts_wlan_device);
+ free_irq(host->sdio_irq, host);
+ }
+
+ if (mmc_card_expected_mem(host->type_expected)) {
+ if (gpio_is_valid(host->detect_pin)) {
+ del_timer_sync(&host->timer);
+ gpio_free(host->detect_pin);
+ }
+ if (host->detect_irq_registered) {
+ free_irq(host->detect, host);
+ host->detect_irq_registered = 0;
+ }
+ free_irq(host->sdc_irq, host);
+ }
+
+ if (mmc_card_expected_emmc(host->type_expected))
+ free_irq(host->sdc_irq, host);
+
+
+ if (host->dma)
+ dma_release_channel(host->dma);
+
+ /* when stop host, power is off */
+ act_mmc_opt_regulator(host,REG_DISENABLE);
+
+ if (host->clk) {
+ /* clk_notifier_unregister(host->clk, &host->nblock); */
+ clk_put(host->clk);
+ }
+
+ mmc_free_host(host->mmc);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+
+static void suspend_wait_data_finish(struct gl520xmmc_host *host,int timeout)
+{
+
+ while ((readl(HOST_CTL(host)) & SD_CTL_TS)&&(--timeout)){
+ udelay(1);
+ }
+
+ if(timeout <=0 ){
+ pr_err("SDC%d mmc suspend wait card finish data timeout\n",host->id);
+ }else{
+ printk("SDC%d mmc card finish data then enter suspend\n",host->id);
+ }
+
+}
+#ifdef CONFIG_PM
+static int acts_mmc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gl520xmmc_host *host = platform_get_drvdata(pdev);
+ struct mmc_host *mmc = host->mmc;
+ int ret = 0;
+
+ int bootdev;
+
+ bootdev = owl_get_boot_dev();
+
+ pr_debug("SD%d host controller driver Enter suspend\n",
+ host->id);
+ //cancel still runing work
+ cancel_delayed_work(&host->host_add_work);
+ if(host->add_host_wq){
+ flush_workqueue(host->add_host_wq);
+ }
+
+ cancel_delayed_work(&host->dma_work);
+ if(host->dma_wq){
+ flush_workqueue(host->dma_wq);
+ }
+
+ //timout 2 s
+ suspend_wait_data_finish(host,2000000);
+
+ if (mmc&&(mmc->card)&&(mmc->card->type == MMC_TYPE_SDIO)){
+ config_inner_charger_current(DEV_CHARGER_PRE_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 0);
+ acts_wlan_set_power(acts_get_wlan_plat_data(), 0, 0);
+ config_inner_charger_current(DEV_CHARGER_POST_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 0);
+ }
+ if(bootdev != OWL_BOOTDEV_SD0){
+ act_mmc_opt_regulator(host,REG_DISENABLE);
+ }
+
+ return ret;
+}
+
+static int acts_mmc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gl520xmmc_host *host = platform_get_drvdata(pdev);
+ struct mmc_host *mmc = host->mmc;
+ int bootdev;
+
+ bootdev = owl_get_boot_dev();
+
+ pr_debug("SD%d host controller Enter resume\n", host->id);
+
+ if(bootdev != OWL_BOOTDEV_SD0){
+ act_mmc_opt_regulator(host,REG_ENABLE);
+ }
+
+ if (mmc && (mmc->card)&&(mmc->card->type == MMC_TYPE_SDIO)){
+ config_inner_charger_current(DEV_CHARGER_PRE_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 1);
+ acts_wlan_set_power(acts_get_wlan_plat_data(), 1, 0);
+ config_inner_charger_current(DEV_CHARGER_POST_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 1);
+ }
+
+ return 0;
+}
+#else
+#define acts_mmc_suspend NULL
+#define acts_mmc_resume NULL
+#endif
+
+static const struct of_device_id acts_mmc_dt_match[] = {
+ {.compatible = "actions,owl-mmc", },
+ {}
+};
+
+static const struct dev_pm_ops acts_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(acts_mmc_suspend, acts_mmc_resume)
+};
+
+static struct platform_driver __refdata acts_mmc_driver = {
+ .probe = acts_mmc_probe,
+ .remove = acts_mmc_remove,
+ .driver = {
+ .name = "gl520x_mmc",
+ .owner = THIS_MODULE,
+ .of_match_table = acts_mmc_dt_match,
+ .pm = &acts_pm_ops,
+ },
+};
+
+static int __init acts_mmc_init(void)
+{
+ int ret = 0;
+ ret = platform_driver_register(&acts_mmc_driver);
+ if (ret) {
+ pr_err("SD/MMC controller driver register failed\n");
+ ret = -ENOMEM;
+
+ }
+ return ret;
+}
+
+static void __exit acts_mmc_exit(void)
+{
+ platform_driver_unregister(&acts_mmc_driver);
+
+}
+
+module_init(acts_mmc_init);
+module_exit(acts_mmc_exit);
+
+MODULE_AUTHOR("Actions");
+MODULE_DESCRIPTION("MMC/SD host controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/gl520x_mmc.h b/drivers/mmc/host/gl520x_mmc.h
new file mode 100755
index 0000000..f9abf23
--- /dev/null
+++ b/drivers/mmc/host/gl520x_mmc.h
@@ -0,0 +1,408 @@
+#ifndef _GL520X_MMC_H_
+#define _GL520X_MMC_H_
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-direction.h>
+#include <linux/clk.h>
+#include <mach/hdmac-owl.h>
+#include <mach/module-owl.h>
+#include <mach/clkname.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#define PINCTRL_UART_PIN "share_uart2_5"
+#define OWL_UPGRADE 0
+#define OWL_NORMAL_BOOT 1
+
+#define REG_ENABLE 1
+#define REG_DISENABLE 0
+#define MMC_CMD_COMPLETE 1
+//#define TSD_SUPPORT 1
+/*
+ * command response code
+ */
+#define CMD_OK BIT(0)
+#define CMD_RSP_ERR BIT(1)
+#define CMD_RSP_BUSY BIT(2)
+#define CMD_RSP_CRC_ERR BIT(3)
+#define CMD_TS_TIMEOUT BIT(4)
+#define CMD_DATA_TIMEOUT BIT(5)
+#define HW_TIMEOUT BIT(6)
+#define DATA_WR_CRC_ERR BIT(7)
+#define DATA_RD_CRC_ERR BIT(8)
+#define DATA0_BUSY_ERR BIT(9)
+
+#define OWL_MMC_WORK_QUEUE_NAME 32
+#define OWL_RETRY_DELAY_CHAIN_TIME 2
+enum {
+ PURE_CMD,
+ DATA_CMD ,
+};
+/*
+ * card type
+ */
+enum {
+ MMC_CARD_DISABLE,
+ MMC_CARD_MEMORY,
+ MMC_CARD_EMMC,
+ MMC_CARD_WIFI,
+};
+#define mmc_card_expected_mem(type) ((type) == MMC_CARD_MEMORY)
+#define mmc_card_expected_emmc(type) ((type) == MMC_CARD_EMMC)
+#define mmc_card_expected_wifi(type) ((type) == MMC_CARD_WIFI)
+
+/*
+ * card detect method
+ */
+enum {
+ SIRQ_DETECT_CARD,
+ GPIO_DETECT_CARD,
+ COMMAND_DETECT_CARD,
+};
+
+#define SDC0_SLOT 0
+#define SDC1_SLOT 1
+#define SDC2_SLOT 2
+
+#define UART_PIN 0
+#define SD_PIN 1
+#define ERR_PIN -1
+
+#define ACTS_MMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
+ MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
+ MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
+
+struct mmc_con_delay {
+ unsigned char delay_lowclk;
+ unsigned char delay_midclk;
+ unsigned char delay_highclk;
+};
+
+struct gl520xmmc_host {
+ spinlock_t lock;
+ struct mutex pin_mutex;
+ u32 id; /* SD Controller number */
+ u32 module_id; /* global module ID */
+ void __iomem *iobase;
+ u32 start;
+ u32 type_expected; /* MEMORY Card or SDIO Card */
+
+ int card_detect_mode; /* which method used to detect card */
+
+ u32 detect; /* irq line for mmc/sd card detect */
+ u32 detect_sirq; /* Which SIRQx used to detect card */
+ int detect_irq_registered; /* card detect irq is registered */
+
+ u32 sdc_irq; /* irq line for SDC transfer end */
+ struct completion sdc_complete;
+
+ u32 sdio_irq; /* irq for SDIO wifi data transfer */
+ u32 eject; /* card status */
+
+ int power_state; /* card status */
+ int bus_width; /* data bus width */
+ int chip_select;
+ int timing;
+ u32 clock; /* current clock frequency */
+ u32 clk_on; /* card module clock status */
+ struct clk *clk; /* SDC clock source */
+ struct notifier_block nblock; /* clkfreq notifier block */
+ struct regulator *reg; /* supply regulator */
+
+ struct timer_list timer; /* used for gpio card detect */
+ u32 detect_pin; /* gpio card detect pin number */
+ int wpswitch_gpio; /* card write protect gpio */
+ int present; /* card is inserted or extracted ? */
+ int sdio_present; /* Wi-Fi is open or not ? */
+ char dma_terminate;
+ char switch_pin_flag; /*UART_PIN: uart mode and host0 cmd sd0 clk vail
+ * SD_PIN: cmd clk sd0-sd3
+ * ERR_PIN: init status
+ */
+ struct pinctrl *pcl;
+ bool dmaflag;
+ unsigned char write_delay_chain;
+ unsigned char read_delay_chain;
+ unsigned char write_delay_chain_bak;
+ unsigned char read_delay_chain_bak;
+ unsigned char adjust_write_delay_chain;
+ unsigned char adjust_read_delay_chain;
+ int sdio_uart_supported;
+ int card_detect_reverse;
+ int send_continuous_clock; /* WiFi need to send continuous clock */
+
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+
+ enum dma_data_direction dma_dir;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config dma_conf;
+ struct owl_dma_slave dma_slave;
+
+ struct completion dma_complete;
+ struct workqueue_struct *dma_wq;
+ struct workqueue_struct *add_host_wq;
+ struct delayed_work dma_work;
+ struct delayed_work host_add_work;
+
+ struct mmc_con_delay wdelay;
+ struct mmc_con_delay rdelay;
+ unsigned char pad_drv;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend mmc_es_handler; // for elay suspend
+ unsigned int mmc_early_suspend;
+#endif
+};
+
+/*
+ * PAD Drive Capacity config
+ */
+#define PAD_DRV_LOW (0)
+#define PAD_DRV_MID (1)
+#define PAD_DRV_HIGH (3)
+
+#define SDC0_WDELAY_LOW_CLK (0xf)
+#define SDC0_WDELAY_MID_CLK (0xa)
+#define SDC0_WDELAY_HIGH_CLK (0x9)
+
+#define SDC0_RDELAY_LOW_CLK (0xf)
+#define SDC0_RDELAY_MID_CLK (0xa)
+#define SDC0_RDELAY_HIGH_CLK (0x8)
+#define SDC0_RDELAY_DDR50 (0x9)
+#define SDC0_WDELAY_DDR50 (0x8)
+#define DDR50_CLK (40000000)
+
+#define SDC0_PAD_DRV PAD_DRV_MID
+
+#define SDC1_WDELAY_LOW_CLK (0xf)
+#define SDC1_WDELAY_MID_CLK (0xa)
+#define SDC1_WDELAY_HIGH_CLK (0x8)
+
+#define SDC1_RDELAY_LOW_CLK (0xf)
+#define SDC1_RDELAY_MID_CLK (0xa)
+#define SDC1_RDELAY_HIGH_CLK (0x8)
+
+#define SDC1_PAD_DRV PAD_DRV_MID
+
+#define SDC2_WDELAY_LOW_CLK (0xf)
+#define SDC2_WDELAY_MID_CLK (0xa)
+#define SDC2_WDELAY_HIGH_CLK (0x8)
+
+#define SDC2_RDELAY_LOW_CLK (0xf)
+#define SDC2_RDELAY_MID_CLK (0xa)
+#define SDC2_RDELAY_HIGH_CLK (0x8)
+
+#define SDC2_PAD_DRV PAD_DRV_MID
+
+
+/*
+ * SDC registers
+ */
+#define SD_EN_OFFSET 0x0000
+#define SD_CTL_OFFSET 0x0004
+#define SD_STATE_OFFSET 0x0008
+#define SD_CMD_OFFSET 0x000c
+#define SD_ARG_OFFSET 0x0010
+#define SD_RSPBUF0_OFFSET 0x0014
+#define SD_RSPBUF1_OFFSET 0x0018
+#define SD_RSPBUF2_OFFSET 0x001c
+#define SD_RSPBUF3_OFFSET 0x0020
+#define SD_RSPBUF4_OFFSET 0x0024
+#define SD_DAT_OFFSET 0x0028
+#define SD_BLK_SIZE_OFFSET 0x002c
+#define SD_BLK_NUM_OFFSET 0x0030
+#define SD_BUF_SIZE_OFFSET 0x0034
+
+#define HOST_EN(h) ((h)->iobase + SD_EN_OFFSET)
+#define HOST_CTL(h) ((h)->iobase + SD_CTL_OFFSET)
+#define HOST_STATE(h) ((h)->iobase + SD_STATE_OFFSET)
+#define HOST_CMD(h) ((h)->iobase + SD_CMD_OFFSET)
+#define HOST_ARG(h) ((h)->iobase + SD_ARG_OFFSET)
+#define HOST_RSPBUF0(h) ((h)->iobase + SD_RSPBUF0_OFFSET)
+#define HOST_RSPBUF1(h) ((h)->iobase + SD_RSPBUF1_OFFSET)
+#define HOST_RSPBUF2(h) ((h)->iobase + SD_RSPBUF2_OFFSET)
+#define HOST_RSPBUF3(h) ((h)->iobase + SD_RSPBUF3_OFFSET)
+#define HOST_RSPBUF4(h) ((h)->iobase + SD_RSPBUF4_OFFSET)
+#define HOST_DAT(h) ((h)->iobase + SD_DAT_OFFSET)
+#define HOST_DAT_DMA(h) ((h)->start + SD_DAT_OFFSET)
+#define HOST_BLK_SIZE(h) ((h)->iobase + SD_BLK_SIZE_OFFSET)
+#define HOST_BLK_NUM(h) ((h)->iobase + SD_BLK_NUM_OFFSET)
+#define HOST_BUF_SIZE(h) ((h)->iobase + SD_BUF_SIZE_OFFSET)
+
+/*
+ * Register Bit defines
+ */
+
+/*
+ * Register SD_EN
+ */
+#define SD_EN_RANE (1 << 31)
+/* bit 30 reserved */
+#define SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
+/* bit 23~13 reserved */
+#define SD_EN_S18EN (1 << 12)
+/* bit 11 reserved */
+#define SD_EN_RESE (1 << 10)
+#define SD_EN_DAT1_S (1 << 9)
+#define SD_EN_CLK_S (1 << 8)
+#define SD_ENABLE (1 << 7)
+#define SD_EN_BSEL (1 << 6)
+/* bit 5~4 reserved */
+#define SD_EN_SDIOEN (1 << 3)
+#define SD_EN_DDREN (1 << 2)
+#define SD_EN_DATAWID(x) (((x) & 0x3) << 0)
+
+/*
+ * Register SD_CTL
+ */
+#define SD_CTL_TOUTEN (1 << 31)
+#define SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
+#define SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
+#define SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
+/* bit 15~14 reserved */
+#define SD_CTL_CMDLEN (1 << 13)
+#define SD_CTL_SCC (1 << 12)
+#define SD_CTL_TCN(x) (((x) & 0xf) << 8)
+#define SD_CTL_TS (1 << 7)
+#define SD_CTL_LBE (1 << 6)
+#define SD_CTL_C7EN (1 << 5)
+/* bit 4 reserved */
+#define SD_CTL_TM(x) (((x) & 0xf) << 0)
+
+/*
+ * Register SD_STATE
+ */
+/* bit 31~19 reserved */
+#define SD_STATE_DAT1BS (1 << 18)
+#define SD_STATE_SDIOB_P (1 << 17)
+#define SD_STATE_SDIOB_EN (1 << 16)
+#define SD_STATE_TOUTE (1 << 15)
+#define SD_STATE_BAEP (1 << 14)
+/* bit 13 reserved */
+#define SD_STATE_MEMRDY (1 << 12)
+#define SD_STATE_CMDS (1 << 11)
+#define SD_STATE_DAT1AS (1 << 10)
+#define SD_STATE_SDIOA_P (1 << 9)
+#define SD_STATE_SDIOA_EN (1 << 8)
+#define SD_STATE_DAT0S (1 << 7)
+#define SD_STATE_TEIE (1 << 6)
+#define SD_STATE_TEI (1 << 5)
+#define SD_STATE_CLNR (1 << 4)
+#define SD_STATE_CLC (1 << 3)
+#define SD_STATE_WC16ER (1 << 2)
+#define SD_STATE_RC16ER (1 << 1)
+#define SD_STATE_CRC7ER (1 << 0)
+
+
+/*
+ * DMA mode config
+ */
+#define ATV520X_SDC0WT_DMAMODE (0x00010202) /* DDR->FIFO */
+#define ATV520X_SDC1WT_DMAMODE (0x00010203)
+#define ATV520X_SDC2WT_DMAMODE (0x00010204)
+
+#define ATV520X_SDC0RD_DMAMODE (0x00040802) /* FIFO->DDR */
+#define ATV520X_SDC1RD_DMAMODE (0x00040803)
+#define ATV520X_SDC2RD_DMAMODE (0x00040804)
+
+/*
+ * PAD drive capacity config
+ */
+#define SD1_DRV_HIGH_MASK (~(0x3 << 20)) /* sd1 data */
+#define SD1_DRV_HIGH_LOW (0x0 << 20)
+#define SD1_DRV_HIGH_MID (0x1 << 20)
+#define SD1_DRV_HIGH_HIGH (0x2 << 20)
+
+#define SD1_DRV_HIGH_MASK2 (~(0xF) << 12) /* sd1 cmd, clk */
+#define SD1_DRV_HIGH2_LOW (0x0 << 12)
+#define SD1_DRV_HIGH2_MID (0x5 << 12)
+#define SD1_DRV_HIGH2_HIGH (0xa << 12)
+
+#define SD0_DRV_HIGH_MASK (~(0x3 << 22)) /* sd0 data */
+#define SD0_DRV_HIGH_LOW (0x0 << 22)
+#define SD0_DRV_HIGH_MID (0x1 << 22)
+#define SD0_DRV_HIGH_HIGH (0x3 << 22)
+
+#define SD0_DRV_HIGH_MASK2 (~(0xF << 16)) /* sd0 cmd, clk */
+#define SD0_DRV_HIGH2_LOW (0x0 << 16)
+#define SD0_DRV_HIGH2_MID (0x5 << 16)
+#define SD0_DRV_HIGH2_HIGH (0xf << 16)
+
+//dump_iomap addr
+/*GPIO_MFP_PWM_BASE*/
+#define DUMP_MFP_CTL0(mapbase) (mapbase+MFP_CTL0-GPIO_MFP_PWM_BASE)
+#define DUMP_MFP_CTL1(mapbase) (mapbase+MFP_CTL1-GPIO_MFP_PWM_BASE)
+#define DUMP_MFP_CTL2(mapbase) (mapbase+MFP_CTL2-GPIO_MFP_PWM_BASE)
+#define DUMP_MFP_CTL3(mapbase) (mapbase+MFP_CTL3-GPIO_MFP_PWM_BASE)
+#define DUMP_PAD_DVR0(mapbase) (mapbase+PAD_DRV0-GPIO_MFP_PWM_BASE)
+#define DUMP_PAD_DVR1(mapbase) (mapbase+PAD_DRV1-GPIO_MFP_PWM_BASE)
+#define DUMP_PAD_DVR2(mapbase) (mapbase+PAD_DRV2-GPIO_MFP_PWM_BASE)
+#define DUMP_PAD_PULLCTL0(mapbase) (mapbase+PAD_PULLCTL0-GPIO_MFP_PWM_BASE)
+#define DUMP_PAD_PULLCTL1(mapbase) (mapbase+PAD_PULLCTL1-GPIO_MFP_PWM_BASE)
+#define DUMP_PAD_PULLCTL2(mapbase) (mapbase+PAD_PULLCTL2-GPIO_MFP_PWM_BASE)
+#define DUMP_GPIO_CINEN(mapbase) (mapbase+GPIO_CINEN-GPIO_MFP_PWM_BASE)
+#define DUMP_GPIO_COUTEN(mapbase) (mapbase+GPIO_COUTEN-GPIO_MFP_PWM_BASE)
+/*CMU_BASE*/
+#define DUMP_CMU_DEVCLKEN0(mapbase) (mapbase+CMU_DEVCLKEN0-CMU_BASE)
+#define DUMP_CMU_DEVCLKEN1(mapbase) (mapbase+CMU_DEVCLKEN1-CMU_BASE)
+#define DUMP_CMU_DEVPLL(mapbase) (mapbase+CMU_DEVPLL-CMU_BASE)
+#define DUMP_CMU_NANDPLL(mapbase) (mapbase+CMU_NANDPLL-CMU_BASE)
+#define DUMP_CMU_CMU_SD0CLK(mapbase) (mapbase+CMU_SD0CLK-CMU_BASE)
+#define DUMP_CMU_CMU_SD1CLK(mapbase) (mapbase+CMU_SD1CLK-CMU_BASE)
+#define DUMP_CMU_CMU_SD2CLK(mapbase) (mapbase+CMU_SD2CLK-CMU_BASE)
+
+
+static inline int module_clk_get(struct gl520xmmc_host *host)
+{
+ switch (host->module_id) {
+ case MOD_ID_SD0:
+ host->clk = clk_get(NULL, CLKNAME_SD0_CLK);
+ break;
+ case MOD_ID_SD1:
+ host->clk = clk_get(NULL, CLKNAME_SD1_CLK);
+ break;
+ case MOD_ID_SD2:
+ host->clk = clk_get(NULL, CLKNAME_SD2_CLK);
+ break;
+ default:
+ pr_err("error: CLK, Mod not supported\n");
+ return -1;
+ }
+
+ if (IS_ERR(host->clk)) {
+ pr_err("error: SDC[%u], Can not get host clock\n", host->id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline int module_clk_set_rate(struct gl520xmmc_host *host,
+ unsigned long freq)
+{
+ unsigned long rate;
+ int ret;
+
+ rate = clk_round_rate(host->clk, freq);
+ if (rate < 0) {
+ pr_err("SDC%d cannot get suitable rate:%lu\n", host->id, rate);
+ return -ENXIO;
+ }
+
+ ret = clk_set_rate(host->clk, rate);
+ if (ret < 0) {
+ pr_err("SDC%d Cannot set rate %ld: %d\n", host->id, rate, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* symbols exported from asoc_serial.c */
+extern int sdio_uart_pinctrl_request(void);
+extern void sdio_uart_pinctrl_free(void);
+
+#endif /* end of _GL520X_MMC_H_ */
diff --git a/drivers/mmc/host/gl520x_wifi_test.c b/drivers/mmc/host/gl520x_wifi_test.c
new file mode 100755
index 0000000..36dbb1a
--- /dev/null
+++ b/drivers/mmc/host/gl520x_wifi_test.c
@@ -0,0 +1,57 @@
+/*
+ * gl520x_mmc.c - SD/MMC host controller driver
+ *
+ * Copyright (C) 2012, Actions Semiconductor Co. LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
+#include <linux/cpufreq.h>
+#include <linux/genhd.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/clk.h>
+
+#include "gl520x_wifi_test.h"
+
+static int __init acts_mmc_init(void)
+{
+ int ret;
+
+ ret = acts_wifi_init();
+ if (unlikely(ret < 0)) {
+ pr_err("SDIO: Failed to register the power control driver.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit acts_mmc_exit(void)
+{
+ acts_wifi_cleanup();
+}
+
+module_init(acts_mmc_init);
+module_exit(acts_mmc_exit);
+
+MODULE_AUTHOR("Actions");
+MODULE_DESCRIPTION("MMC/SD host controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/gl520x_wifi_test.h b/drivers/mmc/host/gl520x_wifi_test.h
new file mode 100755
index 0000000..c66acd5
--- /dev/null
+++ b/drivers/mmc/host/gl520x_wifi_test.h
@@ -0,0 +1,7 @@
+#ifndef _GL520X_WIFI_TEST_H_
+#define _GL520X_WIFI_TEST_H_
+
+extern int acts_wifi_init(void);
+extern void acts_wifi_cleanup(void);
+
+#endif /* end of _GL520X_WIFI_TEST_H_ */
diff --git a/drivers/mmc/host/wlan_device.c b/drivers/mmc/host/wlan_device.c
new file mode 100755
index 0000000..43189a0
--- /dev/null
+++ b/drivers/mmc/host/wlan_device.c
@@ -0,0 +1,215 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mmc/host.h>
+
+#include "wlan_plat_data.h"
+#include "gl520x_mmc.h"
+
+static void (*wifi_detect_func)(int card_present, void *dev_id);
+static void *wifi_detect_param;
+
+
+static void wlan_status_check(int card_present, void *dev_id)
+{
+ struct gl520xmmc_host *host = dev_id;
+
+ pr_info("SDIO Wi-Fi check status, present (%d -> %d)\n",
+ host->sdio_present, card_present);
+
+ if (card_present == 0) {
+ pr_info("MMC: emulate power off the SDIO card\n");
+ module_reset(host->module_id);
+ }
+
+ host->sdio_present = card_present;
+ mmc_detect_change(host->mmc, 0);
+}
+
+static int wlan_status_check_register(
+ void (*callback)(int card_present, void *dev_id),
+ void *dev_id)
+{
+ if (wifi_detect_func) {
+ pr_err("wifi status check function has registered\n");
+ return -EAGAIN;
+ }
+ wifi_detect_func = callback;
+ wifi_detect_param = dev_id;
+ return 0;
+}
+
+static struct wlan_plat_data *g_pdata;
+
+struct wlan_plat_data *acts_get_wlan_plat_data(void)
+{
+ return g_pdata;
+}
+EXPORT_SYMBOL(acts_get_wlan_plat_data);
+
+void acts_wlan_bt_power(int on)
+{
+ struct wlan_plat_data *pdata = g_pdata;
+
+ if (gpio_is_valid(pdata->wifi_bt_power_gpios)) {
+ if (on) {
+ if (++pdata->wl_bt_ref_count == 1) {
+ gpio_set_value(pdata->wifi_bt_power_gpios, 1);
+ mdelay(20);
+ }
+ } else {
+ if (--pdata->wl_bt_ref_count == 0)
+ gpio_set_value(pdata->wifi_bt_power_gpios, 0);
+ }
+
+ pr_info("Wlan or BT power %s, ref count:%d\n",
+ (on ? "on" : "off"), pdata->wl_bt_ref_count);
+ }
+}
+EXPORT_SYMBOL(acts_wlan_bt_power);
+
+static int wlan_init(struct wlan_plat_data *pdata)
+{
+ struct device_node *np = NULL;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL, "wifi,bt,power,ctl");
+ if (NULL == np) {
+ pr_err("No \"wifi,bt,power,ctl\" node found in dts\n");
+ goto fail;
+ }
+
+ /* wifi en */
+ if (of_find_property(np, "wifi_en_gpios", NULL)) {
+ pdata->wifi_en_gpios = of_get_named_gpio(np,
+ "wifi_en_gpios", 0);
+ if (gpio_is_valid(pdata->wifi_en_gpios)) {
+ ret = gpio_request(pdata->wifi_en_gpios,
+ "wifi_en_gpios");
+ if (ret < 0) {
+ pr_err("couldn't claim wifi_en_gpios pin\n");
+ goto fail;
+ }
+ gpio_direction_output(pdata->wifi_en_gpios, 0);
+ } else {
+ pr_err("gpio for sdio wifi_en_gpios invalid.\n");
+ }
+ }
+
+ return 0;
+
+fail:
+ return -ENXIO;
+}
+
+static int wlan_set_power(struct wlan_plat_data *pdata, int on)
+{
+
+ acts_wlan_bt_power(on);
+
+ if (gpio_is_valid(pdata->wifi_en_gpios)) {
+ if (on) {
+ gpio_set_value(pdata->wifi_en_gpios, 1);
+ mdelay(20);
+ } else {
+ gpio_set_value(pdata->wifi_en_gpios, 0);
+ }
+ }
+
+ mdelay(20);
+
+ return 0;
+}
+
+/*
+ * Open or close wifi, from open
+ */
+static int wlan_card_detect(int open)
+{
+ if (wifi_detect_func)
+ wifi_detect_func(open, wifi_detect_param);
+ else
+ pr_warn("SDIO Wi-Fi card detect error\n");
+ return 0;
+}
+
+static void wlan_exit(struct wlan_plat_data *pdata)
+{
+ if (gpio_is_valid(pdata->wifi_en_gpios))
+ gpio_free(pdata->wifi_en_gpios);
+}
+
+static struct wlan_plat_data wlan_control = {
+ .set_init = wlan_init,
+ .set_exit = wlan_exit,
+ .set_power = wlan_set_power,
+ .set_carddetect = wlan_card_detect,
+};
+
+static void wlan_platform_release(struct device *dev)
+{
+ return ;
+}
+
+struct platform_device acts_wlan_device = {
+ .name = "gl520x_wlan",
+ .id = 0,
+ .dev = {
+ .release = wlan_platform_release,
+ .platform_data = &wlan_control,
+ },
+};
+
+int acts_wlan_bt_power_init(struct wlan_plat_data *pdata)
+{
+ struct device_node *np = NULL;
+ int ret;
+
+ g_pdata = pdata;
+
+ np = of_find_compatible_node(NULL, NULL, "wifi,bt,power,ctl");
+ if (NULL == np) {
+ pr_err("No \"wifi,bt,power,ctl\" node found in dts\n");
+ goto fail;
+ }
+
+ if (of_find_property(np, "wifi_bt_power_gpios", NULL)) {
+ pdata->wifi_bt_power_gpios = of_get_named_gpio(np,
+ "wifi_bt_power_gpios", 0);
+ if (gpio_is_valid(pdata->wifi_bt_power_gpios)) {
+ ret = gpio_request(pdata->wifi_bt_power_gpios,
+ "wifi_bt_power_gpios");
+ if (ret < 0) {
+ pr_err("couldn't claim wifi power gpio pin\n");
+ goto fail;
+ }
+ gpio_direction_output(pdata->wifi_bt_power_gpios, 0);
+ pdata->wl_bt_ref_count = 0;
+ } else {
+ pr_err("gpio for sdio wifi power supply invalid.\n");
+ }
+ }
+ return 0;
+
+fail:
+ return -ENXIO;
+}
+
+void acts_wlan_bt_power_release(void)
+{
+ struct wlan_plat_data *pdata = g_pdata;
+
+ if (gpio_is_valid(pdata->wifi_bt_power_gpios))
+ gpio_free(pdata->wifi_bt_power_gpios);
+}
+
+void acts_wlan_status_check_register(struct gl520xmmc_host *host)
+{
+ wlan_status_check_register(wlan_status_check, host);
+}
diff --git a/drivers/mmc/host/wlan_device.h b/drivers/mmc/host/wlan_device.h
new file mode 100755
index 0000000..4eb9ca7
--- /dev/null
+++ b/drivers/mmc/host/wlan_device.h
@@ -0,0 +1,10 @@
+#ifndef _GL520X_WLAN_DEVICE_H_
+#define _GL520X_WLAN_DEVICE_H_
+
+/* sdio wifi detect */
+extern void acts_wlan_status_check_register(struct gl520xmmc_host *host);
+extern int acts_wlan_bt_power_init(struct wlan_plat_data *pdata);
+extern void acts_wlan_bt_power_release(void);
+extern struct platform_device acts_wlan_device;
+
+#endif /* end of _GL520X_WLAN_DEVICE_H_ */
diff --git a/drivers/mmc/host/wlan_driver.c b/drivers/mmc/host/wlan_driver.c
new file mode 100755
index 0000000..d860718
--- /dev/null
+++ b/drivers/mmc/host/wlan_driver.c
@@ -0,0 +1,185 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/atc260x/atc260x.h>
+
+#include "wlan_plat_data.h"
+
+int g_wifi_type = 0;
+static void (*wifi_hook)(void) = NULL;
+static struct completion wlan_complete;
+
+/* Wi-Fi platform driver */
+static int acts_wlan_init(struct wlan_plat_data *pdata)
+{
+ int ret;
+
+ if (pdata && pdata->set_init) {
+ ret = pdata->set_init(pdata);
+ if (ret < 0)
+ pr_err("sdio wifi: init sdio wifi failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int acts_wlan_set_power(struct wlan_plat_data *pdata, int on,
+ unsigned long msec)
+{
+ if (pdata && pdata->set_power)
+ pdata->set_power(pdata, on);
+
+ if (msec)
+ mdelay(msec);
+ return 0;
+}
+EXPORT_SYMBOL(acts_wlan_set_power);
+
+static int acts_wlan_carddetect(int on, struct wlan_plat_data *pdata)
+{
+ if (pdata && pdata->set_carddetect)
+ pdata->set_carddetect(on);
+
+ return 0;
+}
+
+static void acts_wlan_exit(struct wlan_plat_data *pdata)
+{
+ if (pdata && pdata->set_exit)
+ pdata->set_exit(pdata);
+}
+
+static int wlan_probe(struct platform_device *pdev)
+{
+ struct wlan_plat_data *pdata =
+ (struct wlan_plat_data *)(pdev->dev.platform_data);
+
+ if (acts_wlan_init(pdata)) {
+ pr_err("sdio wifi device probe failed\n");
+ return -ENXIO;
+ }
+
+ config_inner_charger_current(DEV_CHARGER_PRE_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 1);
+ acts_wlan_set_power(pdata, 1, 0);
+ config_inner_charger_current(DEV_CHARGER_POST_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 1);
+
+ if(g_wifi_type != WIFI_TYPE_BCMDHD) {
+ printk("wlan card detection to detect SDIO card!");
+ acts_wlan_carddetect(1, pdata);
+ }
+
+ complete(&wlan_complete);
+ return 0;
+}
+
+static int wlan_remove(struct platform_device *pdev)
+{
+ struct wlan_plat_data *pdata =
+ (struct wlan_plat_data *)(pdev->dev.platform_data);
+
+
+ config_inner_charger_current(DEV_CHARGER_PRE_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 0);
+ acts_wlan_set_power(pdata, 0, 0);
+ config_inner_charger_current(DEV_CHARGER_POST_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 0);
+
+ if(g_wifi_type != WIFI_TYPE_BCMDHD) {
+ printk("wlan card detection to remove SDIO card!");
+ acts_wlan_carddetect(0, pdata);
+ }
+
+ acts_wlan_exit(pdata);
+ complete(&wlan_complete);
+ return 0;
+}
+
+
+static void wlan_shutdown(struct platform_device * pdev)
+{
+ struct wlan_plat_data *pdata = (struct wlan_plat_data *)(pdev->dev.platform_data);
+
+ printk("%s, %d\n", __FUNCTION__, __LINE__);
+
+ if(wifi_hook != NULL)
+ wifi_hook();
+
+ config_inner_charger_current(DEV_CHARGER_PRE_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 0);
+ acts_wlan_set_power(pdata, 0, 0);
+ config_inner_charger_current(DEV_CHARGER_POST_CONFIG,
+ DEV_CHARGER_CURRENT_WIFI, 0);
+
+ if(g_wifi_type != WIFI_TYPE_BCMDHD) {
+ printk("wlan card detection to remove SDIO card!");
+ acts_wlan_carddetect(0, pdata);
+ }
+
+ complete(&wlan_complete);
+ return ;
+}
+
+static int wlan_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ pr_info("##> %s\n", __func__);
+ return 0;
+}
+static int wlan_resume(struct platform_device *pdev)
+{
+ pr_info("##> %s\n", __func__);
+ return 0;
+}
+
+static struct platform_driver wlan_driver = {
+ .probe = wlan_probe,
+ .remove = wlan_remove,
+ .shutdown = wlan_shutdown,
+ .suspend = wlan_suspend,
+ .resume = wlan_resume,
+ .driver = {
+ .name = "gl520x_wlan",
+ }
+};
+
+/* symbols exported to wifi driver
+ *
+ * Param :
+ * type : For wifi type, they define in "wlan_plat_data.h", please don't use '0'
+ * p : If in this module to callback wifi driver function, can call through it. Don't need can add value "NULL"
+ */
+int acts_wifi_init(int type, void *p)
+{
+ int ret;
+
+ g_wifi_type = type;
+ wifi_hook = p;
+
+ init_completion(&wlan_complete);
+ ret = platform_driver_register(&wlan_driver);
+ if (!wait_for_completion_timeout(&wlan_complete,
+ msecs_to_jiffies(3300))) {
+ pr_err("%s: wifi driver register failed\n", __func__);
+ goto fail;
+ }
+
+ return ret;
+fail:
+ platform_driver_unregister(&wlan_driver);
+ return -1;
+}
+EXPORT_SYMBOL(acts_wifi_init);
+
+void acts_wifi_cleanup(void)
+{
+ platform_driver_unregister(&wlan_driver);
+
+ g_wifi_type = 0;
+ wifi_hook = NULL;
+}
+EXPORT_SYMBOL(acts_wifi_cleanup);
+
diff --git a/drivers/mmc/host/wlan_plat_data.h b/drivers/mmc/host/wlan_plat_data.h
new file mode 100755
index 0000000..e2facf5
--- /dev/null
+++ b/drivers/mmc/host/wlan_plat_data.h
@@ -0,0 +1,23 @@
+#ifndef _GL520X_WIFI_PLAT_H_
+#define _GL520X_WIFI_PLAT_H_
+/*
+ * Wi-Fi platform device
+ */
+
+#define WIFI_TYPE_BCMDHD 0x01
+#define WIFI_TYPE_RTK 0x02
+
+struct wlan_plat_data {
+ int (*set_power)(struct wlan_plat_data *pdata, int on);
+ int (*set_carddetect)(int open);
+ int (*set_init)(struct wlan_plat_data *pdata);
+ void (*set_exit)(struct wlan_plat_data *pdata);
+
+ struct platform_device *parent;
+ int wl_bt_ref_count; /* power reference count */
+
+ u32 wifi_bt_power_gpios; /* gpio for (WiFi & BT) power control */
+ u32 wifi_en_gpios; /* wifi ENABLE,PD(power down),DISABLE,RST */
+};
+
+#endif /* end of _GL520X_WIFI_PLAT_H_ */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
old mode 100644
new mode 100755
index 258daf9..997e1bd
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -212,5 +212,6 @@ static inline void mmc_claim_host(struct mmc_host *host)
struct device_node;
extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
extern int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
+extern int sd_mmc_reinit(struct mmc_host *host);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
old mode 100644
new mode 100755
index 4fcbb8c..f37abc7
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -144,6 +144,8 @@ struct mmc_host_ops {
*/
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+ int (*switch_sd_pinctr)(struct mmc_host *host);
+ int (*switch_uart_pinctr)(struct mmc_host *host);
};
struct mmc_card;
@@ -260,9 +262,21 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */
+
+#define MMC_CAP_SET_XPC_330 (1 << 20) /* Host supports >150mA current at 3.3V*/
+#define MMC_CAP_SET_XPC_300 (1 << 21) /* Host supports >150mA current at 3.0V */
+#define MMC_CAP_SET_XPC_180 (1 << 22) /* Host supports >150mA current at 1.8V */
+
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+
+
+#define MMC_CAP_MAX_CURRENT_200 (1 << 26) /* Host max current limit is 200mA */
+#define MMC_CAP_MAX_CURRENT_400 (1 << 27) /* Host max current limit is 400mA */
+#define MMC_CAP_MAX_CURRENT_600 (1 << 28) /* Host max current limit is 600mA */
+#define MMC_CAP_MAX_CURRENT_800 (1 << 29) /* Host max current limit is 800mA */
+
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
--
2.7.4
More information about the linux-yocto
mailing list