Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (26 commits)
mmc: SDHI should depend on SUPERH || ARCH_SHMOBILE
mmc: tmio_mmc: Move some defines into a shared header
mmc: tmio: support aggressive clock gating
mmc: tmio: fix power-mode interpretation
mmc: tmio: remove work-around for unmasked SDIO interrupts
sh: fix SDHI IO address-range
ARM: mach-shmobile: fix SDHI IO address-range
mmc: tmio: only access registers above 0xff, if available
mfd: remove now redundant sh_mobile_sdhi.h header
sh: convert boards to use linux/mmc/sh_mobile_sdhi.h
ARM: mach-shmobile: convert boards to use linux/mmc/sh_mobile_sdhi.h
mmc: tmio: convert the SDHI MMC driver from MFD to a platform driver
sh: ecovec: use the CONFIG_MMC_TMIO symbols instead of MFD
mmc: tmio: split core functionality, DMA and MFD glue
mmc: tmio: use PIO for short transfers
mmc: tmio-mmc: Improve DMA stability on sh-mobile
mmc: fix mmc_app_send_scr() for dma transfer
mmc: sdhci-esdhc: enable esdhc on imx53
mmc: sdhci-esdhc: use writel/readl as general APIs
mmc: sdhci: add the abort CMDTYPE bits definition
...

+1615 -1396
+3 -3
arch/arm/mach-shmobile/board-ap4evb.c
··· 24 24 #include <linux/irq.h> 25 25 #include <linux/platform_device.h> 26 26 #include <linux/delay.h> 27 - #include <linux/mfd/sh_mobile_sdhi.h> 28 27 #include <linux/mfd/tmio.h> 29 28 #include <linux/mmc/host.h> 29 + #include <linux/mmc/sh_mobile_sdhi.h> 30 30 #include <linux/mtd/mtd.h> 31 31 #include <linux/mtd/partitions.h> 32 32 #include <linux/mtd/physmap.h> ··· 312 312 [0] = { 313 313 .name = "SDHI0", 314 314 .start = 0xe6850000, 315 - .end = 0xe68501ff, 315 + .end = 0xe68500ff, 316 316 .flags = IORESOURCE_MEM, 317 317 }, 318 318 [1] = { ··· 345 345 [0] = { 346 346 .name = "SDHI1", 347 347 .start = 0xe6860000, 348 - .end = 0xe68601ff, 348 + .end = 0xe68600ff, 349 349 .flags = IORESOURCE_MEM, 350 350 }, 351 351 [1] = {
+3 -3
arch/arm/mach-shmobile/board-g4evm.c
··· 31 31 #include <linux/input.h> 32 32 #include <linux/input/sh_keysc.h> 33 33 #include <linux/mmc/host.h> 34 - #include <linux/mfd/sh_mobile_sdhi.h> 34 + #include <linux/mmc/sh_mobile_sdhi.h> 35 35 #include <linux/gpio.h> 36 36 #include <mach/sh7377.h> 37 37 #include <mach/common.h> ··· 205 205 [0] = { 206 206 .name = "SDHI0", 207 207 .start = 0xe6d50000, 208 - .end = 0xe6d501ff, 208 + .end = 0xe6d50nff, 209 209 .flags = IORESOURCE_MEM, 210 210 }, 211 211 [1] = { ··· 232 232 [0] = { 233 233 .name = "SDHI1", 234 234 .start = 0xe6d60000, 235 - .end = 0xe6d601ff, 235 + .end = 0xe6d600ff, 236 236 .flags = IORESOURCE_MEM, 237 237 }, 238 238 [1] = {
+4 -4
arch/arm/mach-shmobile/board-mackerel.c
··· 32 32 #include <linux/io.h> 33 33 #include <linux/i2c.h> 34 34 #include <linux/leds.h> 35 - #include <linux/mfd/sh_mobile_sdhi.h> 36 35 #include <linux/mfd/tmio.h> 37 36 #include <linux/mmc/host.h> 38 37 #include <linux/mmc/sh_mmcif.h> 38 + #include <linux/mmc/sh_mobile_sdhi.h> 39 39 #include <linux/mtd/mtd.h> 40 40 #include <linux/mtd/partitions.h> 41 41 #include <linux/mtd/physmap.h> ··· 690 690 [0] = { 691 691 .name = "SDHI0", 692 692 .start = 0xe6850000, 693 - .end = 0xe68501ff, 693 + .end = 0xe68500ff, 694 694 .flags = IORESOURCE_MEM, 695 695 }, 696 696 [1] = { ··· 725 725 [0] = { 726 726 .name = "SDHI1", 727 727 .start = 0xe6860000, 728 - .end = 0xe68601ff, 728 + .end = 0xe68600ff, 729 729 .flags = IORESOURCE_MEM, 730 730 }, 731 731 [1] = { ··· 768 768 [0] = { 769 769 .name = "SDHI2", 770 770 .start = 0xe6870000, 771 - .end = 0xe68701ff, 771 + .end = 0xe68700ff, 772 772 .flags = IORESOURCE_MEM, 773 773 }, 774 774 [1] = {
+3 -3
arch/sh/boards/mach-ap325rxa/setup.c
··· 14 14 #include <linux/device.h> 15 15 #include <linux/interrupt.h> 16 16 #include <linux/platform_device.h> 17 - #include <linux/mfd/sh_mobile_sdhi.h> 18 17 #include <linux/mmc/host.h> 18 + #include <linux/mmc/sh_mobile_sdhi.h> 19 19 #include <linux/mtd/physmap.h> 20 20 #include <linux/mtd/sh_flctl.h> 21 21 #include <linux/delay.h> ··· 423 423 [0] = { 424 424 .name = "SDHI0", 425 425 .start = 0x04ce0000, 426 - .end = 0x04ce01ff, 426 + .end = 0x04ce00ff, 427 427 .flags = IORESOURCE_MEM, 428 428 }, 429 429 [1] = { ··· 453 453 [0] = { 454 454 .name = "SDHI1", 455 455 .start = 0x04cf0000, 456 - .end = 0x04cf01ff, 456 + .end = 0x04cf00ff, 457 457 .flags = IORESOURCE_MEM, 458 458 }, 459 459 [1] = {
+6 -6
arch/sh/boards/mach-ecovec24/setup.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/device.h> 13 13 #include <linux/platform_device.h> 14 - #include <linux/mfd/sh_mobile_sdhi.h> 15 14 #include <linux/mmc/host.h> 16 15 #include <linux/mmc/sh_mmcif.h> 16 + #include <linux/mmc/sh_mobile_sdhi.h> 17 17 #include <linux/mtd/physmap.h> 18 18 #include <linux/gpio.h> 19 19 #include <linux/interrupt.h> ··· 464 464 .irq = IRQ0, 465 465 }; 466 466 467 - #ifdef CONFIG_MFD_SH_MOBILE_SDHI 467 + #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 468 468 /* SDHI0 */ 469 469 static void sdhi0_set_pwr(struct platform_device *pdev, int state) 470 470 { ··· 482 482 [0] = { 483 483 .name = "SDHI0", 484 484 .start = 0x04ce0000, 485 - .end = 0x04ce01ff, 485 + .end = 0x04ce00ff, 486 486 .flags = IORESOURCE_MEM, 487 487 }, 488 488 [1] = { ··· 522 522 [0] = { 523 523 .name = "SDHI1", 524 524 .start = 0x04cf0000, 525 - .end = 0x04cf01ff, 525 + .end = 0x04cf00ff, 526 526 .flags = IORESOURCE_MEM, 527 527 }, 528 528 [1] = { ··· 880 880 &ceu0_device, 881 881 &ceu1_device, 882 882 &keysc_device, 883 - #ifdef CONFIG_MFD_SH_MOBILE_SDHI 883 + #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 884 884 &sdhi0_device, 885 885 #if !defined(CONFIG_MMC_SH_MMCIF) 886 886 &sdhi1_device, ··· 1162 1162 gpio_direction_input(GPIO_PTR5); 1163 1163 gpio_direction_input(GPIO_PTR6); 1164 1164 1165 - #ifdef CONFIG_MFD_SH_MOBILE_SDHI 1165 + #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE) 1166 1166 /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */ 1167 1167 gpio_request(GPIO_FN_SDHI0CD, NULL); 1168 1168 gpio_request(GPIO_FN_SDHI0WP, NULL);
+2 -2
arch/sh/boards/mach-kfr2r09/setup.c
··· 10 10 #include <linux/init.h> 11 11 #include <linux/platform_device.h> 12 12 #include <linux/interrupt.h> 13 - #include <linux/mfd/sh_mobile_sdhi.h> 14 13 #include <linux/mmc/host.h> 14 + #include <linux/mmc/sh_mobile_sdhi.h> 15 15 #include <linux/mfd/tmio.h> 16 16 #include <linux/mtd/physmap.h> 17 17 #include <linux/mtd/onenand.h> ··· 354 354 [0] = { 355 355 .name = "SDHI0", 356 356 .start = 0x04ce0000, 357 - .end = 0x04ce01ff, 357 + .end = 0x04ce00ff, 358 358 .flags = IORESOURCE_MEM, 359 359 }, 360 360 [1] = {
+2 -2
arch/sh/boards/mach-migor/setup.c
··· 12 12 #include <linux/interrupt.h> 13 13 #include <linux/input.h> 14 14 #include <linux/input/sh_keysc.h> 15 - #include <linux/mfd/sh_mobile_sdhi.h> 16 15 #include <linux/mmc/host.h> 16 + #include <linux/mmc/sh_mobile_sdhi.h> 17 17 #include <linux/mtd/physmap.h> 18 18 #include <linux/mtd/nand.h> 19 19 #include <linux/i2c.h> ··· 399 399 [0] = { 400 400 .name = "SDHI", 401 401 .start = 0x04ce0000, 402 - .end = 0x04ce01ff, 402 + .end = 0x04ce00ff, 403 403 .flags = IORESOURCE_MEM, 404 404 }, 405 405 [1] = {
+3 -3
arch/sh/boards/mach-se/7724/setup.c
··· 14 14 #include <linux/device.h> 15 15 #include <linux/interrupt.h> 16 16 #include <linux/platform_device.h> 17 - #include <linux/mfd/sh_mobile_sdhi.h> 18 17 #include <linux/mmc/host.h> 18 + #include <linux/mmc/sh_mobile_sdhi.h> 19 19 #include <linux/mtd/physmap.h> 20 20 #include <linux/delay.h> 21 21 #include <linux/smc91x.h> ··· 456 456 [0] = { 457 457 .name = "SDHI0", 458 458 .start = 0x04ce0000, 459 - .end = 0x04ce01ff, 459 + .end = 0x04ce00ff, 460 460 .flags = IORESOURCE_MEM, 461 461 }, 462 462 [1] = { ··· 488 488 [0] = { 489 489 .name = "SDHI1", 490 490 .start = 0x04cf0000, 491 - .end = 0x04cf01ff, 491 + .end = 0x04cf00ff, 492 492 .flags = IORESOURCE_MEM, 493 493 }, 494 494 [1] = {
-14
drivers/mfd/Kconfig
··· 60 60 This driver supports the ASIC3 multifunction chip found on many 61 61 PDAs (mainly iPAQ and HTC based ones) 62 62 63 - config MFD_SH_MOBILE_SDHI 64 - bool "Support for SuperH Mobile SDHI" 65 - depends on SUPERH || ARCH_SHMOBILE 66 - select MFD_CORE 67 - select TMIO_MMC_DMA 68 - ---help--- 69 - This driver supports the SDHI hardware block found in many 70 - SuperH Mobile SoCs. 71 - 72 63 config MFD_DAVINCI_VOICECODEC 73 64 tristate 74 65 select MFD_CORE ··· 256 265 config MFD_TMIO 257 266 bool 258 267 default n 259 - 260 - config TMIO_MMC_DMA 261 - bool 262 - select DMA_ENGINE 263 - select DMADEVICES 264 268 265 269 config MFD_T7L66XB 266 270 bool "Support Toshiba T7L66XB"
-1
drivers/mfd/Makefile
··· 6 6 obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o 7 7 obj-$(CONFIG_MFD_SM501) += sm501.o 8 8 obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o 9 - obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o 10 9 11 10 obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o 12 11 obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
+33 -62
drivers/mfd/sh_mobile_sdhi.c drivers/mmc/host/sh_mobile_sdhi.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/platform_device.h> 25 25 #include <linux/mmc/host.h> 26 - #include <linux/mfd/core.h> 26 + #include <linux/mmc/sh_mobile_sdhi.h> 27 27 #include <linux/mfd/tmio.h> 28 - #include <linux/mfd/sh_mobile_sdhi.h> 29 28 #include <linux/sh_dma.h> 29 + 30 + #include "tmio_mmc.h" 30 31 31 32 struct sh_mobile_sdhi { 32 33 struct clk *clk; 33 34 struct tmio_mmc_data mmc_data; 34 - struct mfd_cell cell_mmc; 35 35 struct sh_dmae_slave param_tx; 36 36 struct sh_dmae_slave param_rx; 37 37 struct tmio_mmc_dma dma_priv; 38 38 }; 39 39 40 - static struct resource sh_mobile_sdhi_resources[] = { 41 - { 42 - .start = 0x000, 43 - .end = 0x1ff, 44 - .flags = IORESOURCE_MEM, 45 - }, 46 - { 47 - .start = 0, 48 - .end = 0, 49 - .flags = IORESOURCE_IRQ, 50 - }, 51 - }; 52 - 53 - static struct mfd_cell sh_mobile_sdhi_cell = { 54 - .name = "tmio-mmc", 55 - .num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources), 56 - .resources = sh_mobile_sdhi_resources, 57 - }; 58 - 59 - static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state) 40 + static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) 60 41 { 61 - struct platform_device *pdev = to_platform_device(tmio->dev.parent); 62 42 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 63 43 64 44 if (p && p->set_pwr) 65 45 p->set_pwr(pdev, state); 66 46 } 67 47 68 - static int sh_mobile_sdhi_get_cd(struct platform_device *tmio) 48 + static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) 69 49 { 70 - struct platform_device *pdev = to_platform_device(tmio->dev.parent); 71 50 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 72 51 73 52 if (p && p->get_cd) ··· 60 81 struct sh_mobile_sdhi *priv; 61 82 struct tmio_mmc_data *mmc_data; 62 83 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 63 - struct resource *mem; 84 + struct tmio_mmc_host *host; 64 85 char clk_name[8]; 65 - int ret, irq; 66 - 67 - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 68 - if (!mem) 69 - dev_err(&pdev->dev, "missing MEM resource\n"); 70 - 71 - irq = platform_get_irq(pdev, 0); 72 - if (irq < 0) 73 - dev_err(&pdev->dev, "missing IRQ resource\n"); 74 - 75 - if (!mem || (irq < 0)) 76 - return -EINVAL; 86 + int ret; 77 87 78 88 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); 79 89 if (priv == NULL) { ··· 77 109 if (IS_ERR(priv->clk)) { 78 110 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); 79 111 ret = PTR_ERR(priv->clk); 80 - kfree(priv); 81 - return ret; 112 + goto eclkget; 82 113 } 83 114 84 115 clk_enable(priv->clk); ··· 90 123 mmc_data->flags = p->tmio_flags; 91 124 mmc_data->ocr_mask = p->tmio_ocr_mask; 92 125 mmc_data->capabilities |= p->tmio_caps; 126 + 127 + if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { 128 + priv->param_tx.slave_id = p->dma_slave_tx; 129 + priv->param_rx.slave_id = p->dma_slave_rx; 130 + priv->dma_priv.chan_priv_tx = &priv->param_tx; 131 + priv->dma_priv.chan_priv_rx = &priv->param_rx; 132 + priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ 133 + mmc_data->dma = &priv->dma_priv; 134 + } 93 135 } 94 136 95 137 /* ··· 112 136 */ 113 137 mmc_data->flags |= TMIO_MMC_SDIO_IRQ; 114 138 115 - if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { 116 - priv->param_tx.slave_id = p->dma_slave_tx; 117 - priv->param_rx.slave_id = p->dma_slave_rx; 118 - priv->dma_priv.chan_priv_tx = &priv->param_tx; 119 - priv->dma_priv.chan_priv_rx = &priv->param_rx; 120 - priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ 121 - mmc_data->dma = &priv->dma_priv; 122 - } 139 + ret = tmio_mmc_host_probe(&host, pdev, mmc_data); 140 + if (ret < 0) 141 + goto eprobe; 123 142 124 - memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); 125 - priv->cell_mmc.mfd_data = mmc_data; 143 + pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 144 + (unsigned long)host->ctl, host->irq); 126 145 127 - platform_set_drvdata(pdev, priv); 146 + return ret; 128 147 129 - ret = mfd_add_devices(&pdev->dev, pdev->id, 130 - &priv->cell_mmc, 1, mem, irq); 131 - if (ret) { 132 - clk_disable(priv->clk); 133 - clk_put(priv->clk); 134 - kfree(priv); 135 - } 136 - 148 + eprobe: 149 + clk_disable(priv->clk); 150 + clk_put(priv->clk); 151 + eclkget: 152 + kfree(priv); 137 153 return ret; 138 154 } 139 155 140 156 static int sh_mobile_sdhi_remove(struct platform_device *pdev) 141 157 { 142 - struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev); 158 + struct mmc_host *mmc = platform_get_drvdata(pdev); 159 + struct tmio_mmc_host *host = mmc_priv(mmc); 160 + struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); 143 161 144 - mfd_remove_devices(&pdev->dev); 162 + tmio_mmc_host_remove(host); 145 163 clk_disable(priv->clk); 146 164 clk_put(priv->clk); 147 165 kfree(priv); ··· 168 198 MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); 169 199 MODULE_AUTHOR("Magnus Damm"); 170 200 MODULE_LICENSE("GPL v2"); 201 + MODULE_ALIAS("platform:sh_mobile_sdhi");
+1 -2
drivers/mmc/card/mmc_test.c
··· 1875 1875 unsigned int tot_sz, int max_scatter) 1876 1876 { 1877 1877 unsigned int dev_addr, i, cnt, sz, ssz; 1878 - struct timespec ts1, ts2, ts; 1878 + struct timespec ts1, ts2; 1879 1879 int ret; 1880 1880 1881 1881 sz = test->area.max_tfr; ··· 1912 1912 } 1913 1913 getnstimeofday(&ts2); 1914 1914 1915 - ts = timespec_sub(ts2, ts1); 1916 1915 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1917 1916 1918 1917 return 0;
+13 -1
drivers/mmc/core/sd_ops.c
··· 9 9 * your option) any later version. 10 10 */ 11 11 12 + #include <linux/slab.h> 12 13 #include <linux/types.h> 13 14 #include <linux/scatterlist.h> 14 15 ··· 253 252 struct mmc_command cmd; 254 253 struct mmc_data data; 255 254 struct scatterlist sg; 255 + void *data_buf; 256 256 257 257 BUG_ON(!card); 258 258 BUG_ON(!card->host); ··· 264 262 err = mmc_app_cmd(card->host, card); 265 263 if (err) 266 264 return err; 265 + 266 + /* dma onto stack is unsafe/nonportable, but callers to this 267 + * routine normally provide temporary on-stack buffers ... 268 + */ 269 + data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); 270 + if (data_buf == NULL) 271 + return -ENOMEM; 267 272 268 273 memset(&mrq, 0, sizeof(struct mmc_request)); 269 274 memset(&cmd, 0, sizeof(struct mmc_command)); ··· 289 280 data.sg = &sg; 290 281 data.sg_len = 1; 291 282 292 - sg_init_one(&sg, scr, 8); 283 + sg_init_one(&sg, data_buf, 8); 293 284 294 285 mmc_set_data_timeout(&data, card); 295 286 296 287 mmc_wait_for_req(card->host, &mrq); 288 + 289 + memcpy(scr, data_buf, sizeof(card->raw_scr)); 290 + kfree(data_buf); 297 291 298 292 if (cmd.error) 299 293 return cmd.error;
+13 -1
drivers/mmc/host/Kconfig
··· 439 439 To compile this driver as a module, choose M here: the 440 440 module will be called sdricoh_cs. 441 441 442 + config MMC_TMIO_CORE 443 + tristate 444 + 442 445 config MMC_TMIO 443 446 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" 444 - depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI 447 + depends on MFD_TMIO || MFD_ASIC3 448 + select MMC_TMIO_CORE 445 449 help 446 450 This provides support for the SD/MMC cell found in TC6393XB, 447 451 T7L66XB and also HTC ASIC3 452 + 453 + config MMC_SDHI 454 + tristate "SH-Mobile SDHI SD/SDIO controller support" 455 + depends on SUPERH || ARCH_SHMOBILE 456 + select MMC_TMIO_CORE 457 + help 458 + This provides support for the SDHI SD/SDIO controller found in 459 + SuperH and ARM SH-Mobile SoCs 448 460 449 461 config MMC_CB710 450 462 tristate "ENE CB710 MMC/SD Interface support"
+7 -1
drivers/mmc/host/Makefile
··· 29 29 obj-$(CONFIG_MMC_S3C) += s3cmci.o 30 30 obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 31 31 obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 32 - obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 32 + obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o 33 + tmio_mmc_core-y := tmio_mmc_pio.o 34 + ifneq ($(CONFIG_MMC_SDHI),n) 35 + tmio_mmc_core-y += tmio_mmc_dma.o 36 + endif 37 + obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o 38 + obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 33 39 obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 34 40 obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 35 41 obj-$(CONFIG_MMC_DW) += dw_mmc.o
+2 -2
drivers/mmc/host/dw_mmc.c
··· 316 316 317 317 /* Stop the IDMAC running */ 318 318 temp = mci_readl(host, BMOD); 319 - temp &= ~SDMMC_IDMAC_ENABLE; 319 + temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 320 320 mci_writel(host, BMOD, temp); 321 321 } 322 322 ··· 385 385 386 386 /* Enable the IDMAC */ 387 387 temp = mci_readl(host, BMOD); 388 - temp |= SDMMC_IDMAC_ENABLE; 388 + temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 389 389 mci_writel(host, BMOD, temp); 390 390 391 391 /* Start it running */
+12 -1
drivers/mmc/host/mmci.c
··· 68 68 .datalength_bits = 16, 69 69 }; 70 70 71 + static struct variant_data variant_arm_extended_fifo = { 72 + .fifosize = 128 * 4, 73 + .fifohalfsize = 64 * 4, 74 + .datalength_bits = 16, 75 + }; 76 + 71 77 static struct variant_data variant_u300 = { 72 78 .fifosize = 16 * 4, 73 79 .fifohalfsize = 8 * 4, ··· 1283 1277 static struct amba_id mmci_ids[] = { 1284 1278 { 1285 1279 .id = 0x00041180, 1286 - .mask = 0x000fffff, 1280 + .mask = 0xff0fffff, 1287 1281 .data = &variant_arm, 1282 + }, 1283 + { 1284 + .id = 0x01041180, 1285 + .mask = 0xff0fffff, 1286 + .data = &variant_arm_extended_fifo, 1288 1287 }, 1289 1288 { 1290 1289 .id = 0x00041181,
+2
drivers/mmc/host/of_mmc_spi.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/device.h> 17 17 #include <linux/slab.h> 18 + #include <linux/irq.h> 18 19 #include <linux/gpio.h> 19 20 #include <linux/of.h> 20 21 #include <linux/of_gpio.h> 22 + #include <linux/of_irq.h> 21 23 #include <linux/spi/spi.h> 22 24 #include <linux/spi/mmc_spi.h> 23 25 #include <linux/mmc/core.h>
+79 -7
drivers/mmc/host/sdhci-esdhc-imx.c
··· 16 16 #include <linux/err.h> 17 17 #include <linux/clk.h> 18 18 #include <linux/gpio.h> 19 + #include <linux/slab.h> 19 20 #include <linux/mmc/host.h> 20 21 #include <linux/mmc/sdhci-pltfm.h> 22 + #include <linux/mmc/mmc.h> 23 + #include <linux/mmc/sdio.h> 21 24 #include <mach/hardware.h> 22 25 #include <mach/esdhc.h> 23 26 #include "sdhci.h" 24 27 #include "sdhci-pltfm.h" 25 28 #include "sdhci-esdhc.h" 29 + 30 + /* VENDOR SPEC register */ 31 + #define SDHCI_VENDOR_SPEC 0xC0 32 + #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 33 + 34 + #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) 35 + /* 36 + * The CMDTYPE of the CMD register (offset 0xE) should be set to 37 + * "11" when the STOP CMD12 is issued on imx53 to abort one 38 + * open ended multi-blk IO. Otherwise the TC INT wouldn't 39 + * be generated. 40 + * In exact block transfer, the controller doesn't complete the 41 + * operations automatically as required at the end of the 42 + * transfer and remains on hold if the abort command is not sent. 43 + * As a result, the TC flag is not asserted and SW received timeout 44 + * exeception. Bit1 of Vendor Spec registor is used to fix it. 45 + */ 46 + #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) 47 + 48 + struct pltfm_imx_data { 49 + int flags; 50 + u32 scratchpad; 51 + }; 26 52 27 53 static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) 28 54 { ··· 60 34 61 35 static u32 esdhc_readl_le(struct sdhci_host *host, int reg) 62 36 { 37 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 38 + struct pltfm_imx_data *imx_data = pltfm_host->priv; 39 + 63 40 /* fake CARD_PRESENT flag on mx25/35 */ 64 41 u32 val = readl(host->ioaddr + reg); 65 42 66 - if (unlikely(reg == SDHCI_PRESENT_STATE)) { 43 + if (unlikely((reg == SDHCI_PRESENT_STATE) 44 + && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { 67 45 struct esdhc_platform_data *boarddata = 68 46 host->mmc->parent->platform_data; 69 47 ··· 85 55 86 56 static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) 87 57 { 88 - if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) 58 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 59 + struct pltfm_imx_data *imx_data = pltfm_host->priv; 60 + 61 + if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) 62 + && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) 89 63 /* 90 64 * these interrupts won't work with a custom card_detect gpio 91 65 * (only applied to mx25/35) 92 66 */ 93 67 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 68 + 69 + if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 70 + && (reg == SDHCI_INT_STATUS) 71 + && (val & SDHCI_INT_DATA_END))) { 72 + u32 v; 73 + v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); 74 + v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; 75 + writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); 76 + } 94 77 95 78 writel(val, host->ioaddr + reg); 96 79 } ··· 119 76 static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) 120 77 { 121 78 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 79 + struct pltfm_imx_data *imx_data = pltfm_host->priv; 122 80 123 81 switch (reg) { 124 82 case SDHCI_TRANSFER_MODE: ··· 127 83 * Postpone this write, we must do it together with a 128 84 * command write that is down below. 129 85 */ 130 - pltfm_host->scratchpad = val; 86 + if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 87 + && (host->cmd->opcode == SD_IO_RW_EXTENDED) 88 + && (host->cmd->data->blocks > 1) 89 + && (host->cmd->data->flags & MMC_DATA_READ)) { 90 + u32 v; 91 + v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); 92 + v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; 93 + writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); 94 + } 95 + imx_data->scratchpad = val; 131 96 return; 132 97 case SDHCI_COMMAND: 133 - writel(val << 16 | pltfm_host->scratchpad, 98 + if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) 99 + && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) 100 + val |= SDHCI_CMD_ABORTCMD; 101 + writel(val << 16 | imx_data->scratchpad, 134 102 host->ioaddr + SDHCI_TRANSFER_MODE); 135 103 return; 136 104 case SDHCI_BLOCK_SIZE: ··· 202 146 } 203 147 204 148 static struct sdhci_ops sdhci_esdhc_ops = { 149 + .read_l = esdhc_readl_le, 205 150 .read_w = esdhc_readw_le, 151 + .write_l = esdhc_writel_le, 206 152 .write_w = esdhc_writew_le, 207 153 .write_b = esdhc_writeb_le, 208 154 .set_clock = esdhc_set_clock, ··· 226 168 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; 227 169 struct clk *clk; 228 170 int err; 171 + struct pltfm_imx_data *imx_data; 229 172 230 173 clk = clk_get(mmc_dev(host->mmc), NULL); 231 174 if (IS_ERR(clk)) { ··· 236 177 clk_enable(clk); 237 178 pltfm_host->clk = clk; 238 179 239 - if (cpu_is_mx35() || cpu_is_mx51()) 180 + imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); 181 + if (!imx_data) { 182 + clk_disable(pltfm_host->clk); 183 + clk_put(pltfm_host->clk); 184 + return -ENOMEM; 185 + } 186 + pltfm_host->priv = imx_data; 187 + 188 + if (!cpu_is_mx25()) 240 189 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 241 190 242 191 if (cpu_is_mx25() || cpu_is_mx35()) { ··· 253 186 /* write_protect can't be routed to controller, use gpio */ 254 187 sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; 255 188 } 189 + 190 + if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) 191 + imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; 256 192 257 193 if (boarddata) { 258 194 err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); ··· 284 214 goto no_card_detect_irq; 285 215 } 286 216 287 - sdhci_esdhc_ops.write_l = esdhc_writel_le; 288 - sdhci_esdhc_ops.read_l = esdhc_readl_le; 217 + imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; 289 218 /* Now we have a working card_detect again */ 290 219 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 291 220 } ··· 296 227 no_card_detect_pin: 297 228 boarddata->cd_gpio = err; 298 229 not_supported: 230 + kfree(imx_data); 299 231 return 0; 300 232 } 301 233 ··· 304 234 { 305 235 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 306 236 struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; 237 + struct pltfm_imx_data *imx_data = pltfm_host->priv; 307 238 308 239 if (boarddata && gpio_is_valid(boarddata->wp_gpio)) 309 240 gpio_free(boarddata->wp_gpio); ··· 318 247 319 248 clk_disable(pltfm_host->clk); 320 249 clk_put(pltfm_host->clk); 250 + kfree(imx_data); 321 251 } 322 252 323 253 struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+1 -2
drivers/mmc/host/sdhci-esdhc.h
··· 23 23 SDHCI_QUIRK_NONSTANDARD_CLOCK | \ 24 24 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ 25 25 SDHCI_QUIRK_PIO_NEEDS_DELAY | \ 26 - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ 27 - SDHCI_QUIRK_NO_CARD_NO_RESET) 26 + SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 28 27 29 28 #define ESDHC_SYSTEM_CONTROL 0x2c 30 29 #define ESDHC_CLOCK_MASK 0x0000fff0
+2 -1
drivers/mmc/host/sdhci-of-esdhc.c
··· 74 74 75 75 struct sdhci_of_data sdhci_esdhc = { 76 76 /* card detection could be handled via GPIO */ 77 - .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION, 77 + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION 78 + | SDHCI_QUIRK_NO_CARD_NO_RESET, 78 79 .ops = { 79 80 .read_l = sdhci_be32bs_readl, 80 81 .read_w = esdhc_readw,
+2 -4
drivers/mmc/host/sdhci-pci.c
··· 1016 1016 struct sdhci_pci_chip *chip; 1017 1017 struct sdhci_pci_slot *slot; 1018 1018 1019 - u8 slots, rev, first_bar; 1019 + u8 slots, first_bar; 1020 1020 int ret, i; 1021 1021 1022 1022 BUG_ON(pdev == NULL); 1023 1023 BUG_ON(ent == NULL); 1024 1024 1025 - pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); 1026 - 1027 1025 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", 1028 - (int)pdev->vendor, (int)pdev->device, (int)rev); 1026 + (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); 1029 1027 1030 1028 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 1031 1029 if (ret)
+1 -1
drivers/mmc/host/sdhci-pltfm.h
··· 17 17 18 18 struct sdhci_pltfm_host { 19 19 struct clk *clk; 20 - u32 scratchpad; /* to handle quirks across io-accessor calls */ 20 + void *priv; /* to handle quirks across io-accessor calls */ 21 21 }; 22 22 23 23 extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
+1
drivers/mmc/host/sdhci.h
··· 45 45 #define SDHCI_CMD_CRC 0x08 46 46 #define SDHCI_CMD_INDEX 0x10 47 47 #define SDHCI_CMD_DATA 0x20 48 + #define SDHCI_CMD_ABORTCMD 0xC0 48 49 49 50 #define SDHCI_CMD_RESP_NONE 0x00 50 51 #define SDHCI_CMD_RESP_LONG 0x01
+18 -1267
drivers/mmc/host/tmio_mmc.c
··· 1 1 /* 2 - * linux/drivers/mmc/tmio_mmc.c 2 + * linux/drivers/mmc/host/tmio_mmc.c 3 3 * 4 - * Copyright (C) 2004 Ian Molton 5 - * Copyright (C) 2007 Ian Molton 4 + * Copyright (C) 2007 Ian Molton 5 + * Copyright (C) 2004 Ian Molton 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify 8 8 * it under the terms of the GNU General Public License version 2 as ··· 11 11 * Driver for the MMC / SD / SDIO cell found in: 12 12 * 13 13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 - * 15 - * This driver draws mainly on scattered spec sheets, Reverse engineering 16 - * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 17 - * support). (Further 4 bit support from a later datasheet). 18 - * 19 - * TODO: 20 - * Investigate using a workqueue for PIO transfers 21 - * Eliminate FIXMEs 22 - * SDIO support 23 - * Better Power management 24 - * Handle MMC errors better 25 - * double buffer support 26 - * 27 14 */ 28 15 29 - #include <linux/delay.h> 30 16 #include <linux/device.h> 31 - #include <linux/dmaengine.h> 32 - #include <linux/highmem.h> 33 - #include <linux/interrupt.h> 34 - #include <linux/io.h> 35 - #include <linux/irq.h> 36 17 #include <linux/mfd/core.h> 37 18 #include <linux/mfd/tmio.h> 38 19 #include <linux/mmc/host.h> 39 20 #include <linux/module.h> 40 21 #include <linux/pagemap.h> 41 22 #include <linux/scatterlist.h> 42 - #include <linux/workqueue.h> 43 - #include <linux/spinlock.h> 44 23 45 - #define CTL_SD_CMD 0x00 46 - #define CTL_ARG_REG 0x04 47 - #define CTL_STOP_INTERNAL_ACTION 0x08 48 - #define CTL_XFER_BLK_COUNT 0xa 49 - #define CTL_RESPONSE 0x0c 50 - #define CTL_STATUS 0x1c 51 - #define CTL_IRQ_MASK 0x20 52 - #define CTL_SD_CARD_CLK_CTL 0x24 53 - #define CTL_SD_XFER_LEN 0x26 54 - #define CTL_SD_MEM_CARD_OPT 0x28 55 - #define CTL_SD_ERROR_DETAIL_STATUS 0x2c 56 - #define CTL_SD_DATA_PORT 0x30 57 - #define CTL_TRANSACTION_CTL 0x34 58 - #define CTL_SDIO_STATUS 0x36 59 - #define CTL_SDIO_IRQ_MASK 0x38 60 - #define CTL_RESET_SD 0xe0 61 - #define CTL_SDIO_REGS 0x100 62 - #define CTL_CLK_AND_WAIT_CTL 0x138 63 - #define CTL_RESET_SDIO 0x1e0 64 - 65 - /* Definitions for values the CTRL_STATUS register can take. */ 66 - #define TMIO_STAT_CMDRESPEND 0x00000001 67 - #define TMIO_STAT_DATAEND 0x00000004 68 - #define TMIO_STAT_CARD_REMOVE 0x00000008 69 - #define TMIO_STAT_CARD_INSERT 0x00000010 70 - #define TMIO_STAT_SIGSTATE 0x00000020 71 - #define TMIO_STAT_WRPROTECT 0x00000080 72 - #define TMIO_STAT_CARD_REMOVE_A 0x00000100 73 - #define TMIO_STAT_CARD_INSERT_A 0x00000200 74 - #define TMIO_STAT_SIGSTATE_A 0x00000400 75 - #define TMIO_STAT_CMD_IDX_ERR 0x00010000 76 - #define TMIO_STAT_CRCFAIL 0x00020000 77 - #define TMIO_STAT_STOPBIT_ERR 0x00040000 78 - #define TMIO_STAT_DATATIMEOUT 0x00080000 79 - #define TMIO_STAT_RXOVERFLOW 0x00100000 80 - #define TMIO_STAT_TXUNDERRUN 0x00200000 81 - #define TMIO_STAT_CMDTIMEOUT 0x00400000 82 - #define TMIO_STAT_RXRDY 0x01000000 83 - #define TMIO_STAT_TXRQ 0x02000000 84 - #define TMIO_STAT_ILL_FUNC 0x20000000 85 - #define TMIO_STAT_CMD_BUSY 0x40000000 86 - #define TMIO_STAT_ILL_ACCESS 0x80000000 87 - 88 - /* Definitions for values the CTRL_SDIO_STATUS register can take. */ 89 - #define TMIO_SDIO_STAT_IOIRQ 0x0001 90 - #define TMIO_SDIO_STAT_EXPUB52 0x4000 91 - #define TMIO_SDIO_STAT_EXWT 0x8000 92 - #define TMIO_SDIO_MASK_ALL 0xc007 93 - 94 - /* Define some IRQ masks */ 95 - /* This is the mask used at reset by the chip */ 96 - #define TMIO_MASK_ALL 0x837f031d 97 - #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) 98 - #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) 99 - #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ 100 - TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 101 - #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 102 - 103 - #define enable_mmc_irqs(host, i) \ 104 - do { \ 105 - u32 mask;\ 106 - mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ 107 - mask &= ~((i) & TMIO_MASK_IRQ); \ 108 - sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ 109 - } while (0) 110 - 111 - #define disable_mmc_irqs(host, i) \ 112 - do { \ 113 - u32 mask;\ 114 - mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \ 115 - mask |= ((i) & TMIO_MASK_IRQ); \ 116 - sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ 117 - } while (0) 118 - 119 - #define ack_mmc_irqs(host, i) \ 120 - do { \ 121 - sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ 122 - } while (0) 123 - 124 - /* This is arbitrary, just noone needed any higher alignment yet */ 125 - #define MAX_ALIGN 4 126 - 127 - struct tmio_mmc_host { 128 - void __iomem *ctl; 129 - unsigned long bus_shift; 130 - struct mmc_command *cmd; 131 - struct mmc_request *mrq; 132 - struct mmc_data *data; 133 - struct mmc_host *mmc; 134 - int irq; 135 - unsigned int sdio_irq_enabled; 136 - 137 - /* Callbacks for clock / power control */ 138 - void (*set_pwr)(struct platform_device *host, int state); 139 - void (*set_clk_div)(struct platform_device *host, int state); 140 - 141 - /* pio related stuff */ 142 - struct scatterlist *sg_ptr; 143 - struct scatterlist *sg_orig; 144 - unsigned int sg_len; 145 - unsigned int sg_off; 146 - 147 - struct platform_device *pdev; 148 - 149 - /* DMA support */ 150 - struct dma_chan *chan_rx; 151 - struct dma_chan *chan_tx; 152 - struct tasklet_struct dma_complete; 153 - struct tasklet_struct dma_issue; 154 - #ifdef CONFIG_TMIO_MMC_DMA 155 - u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN))); 156 - struct scatterlist bounce_sg; 157 - #endif 158 - 159 - /* Track lost interrupts */ 160 - struct delayed_work delayed_reset_work; 161 - spinlock_t lock; 162 - unsigned long last_req_ts; 163 - }; 164 - 165 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host); 166 - 167 - static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) 168 - { 169 - return readw(host->ctl + (addr << host->bus_shift)); 170 - } 171 - 172 - static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, 173 - u16 *buf, int count) 174 - { 175 - readsw(host->ctl + (addr << host->bus_shift), buf, count); 176 - } 177 - 178 - static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) 179 - { 180 - return readw(host->ctl + (addr << host->bus_shift)) | 181 - readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; 182 - } 183 - 184 - static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) 185 - { 186 - writew(val, host->ctl + (addr << host->bus_shift)); 187 - } 188 - 189 - static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, 190 - u16 *buf, int count) 191 - { 192 - writesw(host->ctl + (addr << host->bus_shift), buf, count); 193 - } 194 - 195 - static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) 196 - { 197 - writew(val, host->ctl + (addr << host->bus_shift)); 198 - writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 199 - } 200 - 201 - static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 202 - { 203 - host->sg_len = data->sg_len; 204 - host->sg_ptr = data->sg; 205 - host->sg_orig = data->sg; 206 - host->sg_off = 0; 207 - } 208 - 209 - static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 210 - { 211 - host->sg_ptr = sg_next(host->sg_ptr); 212 - host->sg_off = 0; 213 - return --host->sg_len; 214 - } 215 - 216 - static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 217 - { 218 - local_irq_save(*flags); 219 - return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 220 - } 221 - 222 - static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt) 223 - { 224 - kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); 225 - local_irq_restore(*flags); 226 - } 227 - 228 - #ifdef CONFIG_MMC_DEBUG 229 - 230 - #define STATUS_TO_TEXT(a, status, i) \ 231 - do { \ 232 - if (status & TMIO_STAT_##a) { \ 233 - if (i++) \ 234 - printk(" | "); \ 235 - printk(#a); \ 236 - } \ 237 - } while (0) 238 - 239 - void pr_debug_status(u32 status) 240 - { 241 - int i = 0; 242 - printk(KERN_DEBUG "status: %08x = ", status); 243 - STATUS_TO_TEXT(CARD_REMOVE, status, i); 244 - STATUS_TO_TEXT(CARD_INSERT, status, i); 245 - STATUS_TO_TEXT(SIGSTATE, status, i); 246 - STATUS_TO_TEXT(WRPROTECT, status, i); 247 - STATUS_TO_TEXT(CARD_REMOVE_A, status, i); 248 - STATUS_TO_TEXT(CARD_INSERT_A, status, i); 249 - STATUS_TO_TEXT(SIGSTATE_A, status, i); 250 - STATUS_TO_TEXT(CMD_IDX_ERR, status, i); 251 - STATUS_TO_TEXT(STOPBIT_ERR, status, i); 252 - STATUS_TO_TEXT(ILL_FUNC, status, i); 253 - STATUS_TO_TEXT(CMD_BUSY, status, i); 254 - STATUS_TO_TEXT(CMDRESPEND, status, i); 255 - STATUS_TO_TEXT(DATAEND, status, i); 256 - STATUS_TO_TEXT(CRCFAIL, status, i); 257 - STATUS_TO_TEXT(DATATIMEOUT, status, i); 258 - STATUS_TO_TEXT(CMDTIMEOUT, status, i); 259 - STATUS_TO_TEXT(RXOVERFLOW, status, i); 260 - STATUS_TO_TEXT(TXUNDERRUN, status, i); 261 - STATUS_TO_TEXT(RXRDY, status, i); 262 - STATUS_TO_TEXT(TXRQ, status, i); 263 - STATUS_TO_TEXT(ILL_ACCESS, status, i); 264 - printk("\n"); 265 - } 266 - 267 - #else 268 - #define pr_debug_status(s) do { } while (0) 269 - #endif 270 - 271 - static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 272 - { 273 - struct tmio_mmc_host *host = mmc_priv(mmc); 274 - 275 - if (enable) { 276 - host->sdio_irq_enabled = 1; 277 - sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 278 - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, 279 - (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); 280 - } else { 281 - sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); 282 - sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 283 - host->sdio_irq_enabled = 0; 284 - } 285 - } 286 - 287 - static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 288 - { 289 - u32 clk = 0, clock; 290 - 291 - if (new_clock) { 292 - for (clock = host->mmc->f_min, clk = 0x80000080; 293 - new_clock >= (clock<<1); clk >>= 1) 294 - clock <<= 1; 295 - clk |= 0x100; 296 - } 297 - 298 - if (host->set_clk_div) 299 - host->set_clk_div(host->pdev, (clk>>22) & 1); 300 - 301 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 302 - } 303 - 304 - static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 305 - { 306 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 307 - 308 - /* 309 - * Testing on sh-mobile showed that SDIO IRQs are unmasked when 310 - * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the 311 - * device IRQ here and restore the SDIO IRQ mask before 312 - * re-enabling the device IRQ. 313 - */ 314 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 315 - disable_irq(host->irq); 316 - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 317 - msleep(10); 318 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 319 - tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); 320 - enable_irq(host->irq); 321 - } 322 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 323 - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 324 - msleep(10); 325 - } 326 - 327 - static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 328 - { 329 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 330 - 331 - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 332 - sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 333 - msleep(10); 334 - /* see comment in tmio_mmc_clk_stop above */ 335 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 336 - disable_irq(host->irq); 337 - sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 338 - msleep(10); 339 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) { 340 - tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled); 341 - enable_irq(host->irq); 342 - } 343 - } 344 - 345 - static void reset(struct tmio_mmc_host *host) 346 - { 347 - /* FIXME - should we set stop clock reg here */ 348 - sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 349 - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 350 - msleep(10); 351 - sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 352 - sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 353 - msleep(10); 354 - } 355 - 356 - static void tmio_mmc_reset_work(struct work_struct *work) 357 - { 358 - struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 359 - delayed_reset_work.work); 360 - struct mmc_request *mrq; 361 - unsigned long flags; 362 - 363 - spin_lock_irqsave(&host->lock, flags); 364 - mrq = host->mrq; 365 - 366 - /* request already finished */ 367 - if (!mrq 368 - || time_is_after_jiffies(host->last_req_ts + 369 - msecs_to_jiffies(2000))) { 370 - spin_unlock_irqrestore(&host->lock, flags); 371 - return; 372 - } 373 - 374 - dev_warn(&host->pdev->dev, 375 - "timeout waiting for hardware interrupt (CMD%u)\n", 376 - mrq->cmd->opcode); 377 - 378 - if (host->data) 379 - host->data->error = -ETIMEDOUT; 380 - else if (host->cmd) 381 - host->cmd->error = -ETIMEDOUT; 382 - else 383 - mrq->cmd->error = -ETIMEDOUT; 384 - 385 - host->cmd = NULL; 386 - host->data = NULL; 387 - host->mrq = NULL; 388 - 389 - spin_unlock_irqrestore(&host->lock, flags); 390 - 391 - reset(host); 392 - 393 - mmc_request_done(host->mmc, mrq); 394 - } 395 - 396 - static void 397 - tmio_mmc_finish_request(struct tmio_mmc_host *host) 398 - { 399 - struct mmc_request *mrq = host->mrq; 400 - 401 - if (!mrq) 402 - return; 403 - 404 - host->mrq = NULL; 405 - host->cmd = NULL; 406 - host->data = NULL; 407 - 408 - cancel_delayed_work(&host->delayed_reset_work); 409 - 410 - mmc_request_done(host->mmc, mrq); 411 - } 412 - 413 - /* These are the bitmasks the tmio chip requires to implement the MMC response 414 - * types. Note that R1 and R6 are the same in this scheme. */ 415 - #define APP_CMD 0x0040 416 - #define RESP_NONE 0x0300 417 - #define RESP_R1 0x0400 418 - #define RESP_R1B 0x0500 419 - #define RESP_R2 0x0600 420 - #define RESP_R3 0x0700 421 - #define DATA_PRESENT 0x0800 422 - #define TRANSFER_READ 0x1000 423 - #define TRANSFER_MULTI 0x2000 424 - #define SECURITY_CMD 0x4000 425 - 426 - static int 427 - tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 428 - { 429 - struct mmc_data *data = host->data; 430 - int c = cmd->opcode; 431 - 432 - /* Command 12 is handled by hardware */ 433 - if (cmd->opcode == 12 && !cmd->arg) { 434 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 435 - return 0; 436 - } 437 - 438 - switch (mmc_resp_type(cmd)) { 439 - case MMC_RSP_NONE: c |= RESP_NONE; break; 440 - case MMC_RSP_R1: c |= RESP_R1; break; 441 - case MMC_RSP_R1B: c |= RESP_R1B; break; 442 - case MMC_RSP_R2: c |= RESP_R2; break; 443 - case MMC_RSP_R3: c |= RESP_R3; break; 444 - default: 445 - pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 446 - return -EINVAL; 447 - } 448 - 449 - host->cmd = cmd; 450 - 451 - /* FIXME - this seems to be ok commented out but the spec suggest this bit 452 - * should be set when issuing app commands. 453 - * if(cmd->flags & MMC_FLAG_ACMD) 454 - * c |= APP_CMD; 455 - */ 456 - if (data) { 457 - c |= DATA_PRESENT; 458 - if (data->blocks > 1) { 459 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 460 - c |= TRANSFER_MULTI; 461 - } 462 - if (data->flags & MMC_DATA_READ) 463 - c |= TRANSFER_READ; 464 - } 465 - 466 - enable_mmc_irqs(host, TMIO_MASK_CMD); 467 - 468 - /* Fire off the command */ 469 - sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 470 - sd_ctrl_write16(host, CTL_SD_CMD, c); 471 - 472 - return 0; 473 - } 474 - 475 - /* 476 - * This chip always returns (at least?) as much data as you ask for. 477 - * I'm unsure what happens if you ask for less than a block. This should be 478 - * looked into to ensure that a funny length read doesnt hose the controller. 479 - */ 480 - static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 481 - { 482 - struct mmc_data *data = host->data; 483 - void *sg_virt; 484 - unsigned short *buf; 485 - unsigned int count; 486 - unsigned long flags; 487 - 488 - if (!data) { 489 - pr_debug("Spurious PIO IRQ\n"); 490 - return; 491 - } 492 - 493 - sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 494 - buf = (unsigned short *)(sg_virt + host->sg_off); 495 - 496 - count = host->sg_ptr->length - host->sg_off; 497 - if (count > data->blksz) 498 - count = data->blksz; 499 - 500 - pr_debug("count: %08x offset: %08x flags %08x\n", 501 - count, host->sg_off, data->flags); 502 - 503 - /* Transfer the data */ 504 - if (data->flags & MMC_DATA_READ) 505 - sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 506 - else 507 - sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 508 - 509 - host->sg_off += count; 510 - 511 - tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 512 - 513 - if (host->sg_off == host->sg_ptr->length) 514 - tmio_mmc_next_sg(host); 515 - 516 - return; 517 - } 518 - 519 - /* needs to be called with host->lock held */ 520 - static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 521 - { 522 - struct mmc_data *data = host->data; 523 - struct mmc_command *stop; 524 - 525 - host->data = NULL; 526 - 527 - if (!data) { 528 - dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 529 - return; 530 - } 531 - stop = data->stop; 532 - 533 - /* FIXME - return correct transfer count on errors */ 534 - if (!data->error) 535 - data->bytes_xfered = data->blocks * data->blksz; 536 - else 537 - data->bytes_xfered = 0; 538 - 539 - pr_debug("Completed data request\n"); 540 - 541 - /* 542 - * FIXME: other drivers allow an optional stop command of any given type 543 - * which we dont do, as the chip can auto generate them. 544 - * Perhaps we can be smarter about when to use auto CMD12 and 545 - * only issue the auto request when we know this is the desired 546 - * stop command, allowing fallback to the stop command the 547 - * upper layers expect. For now, we do what works. 548 - */ 549 - 550 - if (data->flags & MMC_DATA_READ) { 551 - if (!host->chan_rx) 552 - disable_mmc_irqs(host, TMIO_MASK_READOP); 553 - else 554 - tmio_check_bounce_buffer(host); 555 - dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 556 - host->mrq); 557 - } else { 558 - if (!host->chan_tx) 559 - disable_mmc_irqs(host, TMIO_MASK_WRITEOP); 560 - dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 561 - host->mrq); 562 - } 563 - 564 - if (stop) { 565 - if (stop->opcode == 12 && !stop->arg) 566 - sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 567 - else 568 - BUG(); 569 - } 570 - 571 - tmio_mmc_finish_request(host); 572 - } 573 - 574 - static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 575 - { 576 - struct mmc_data *data; 577 - spin_lock(&host->lock); 578 - data = host->data; 579 - 580 - if (!data) 581 - goto out; 582 - 583 - if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { 584 - /* 585 - * Has all data been written out yet? Testing on SuperH showed, 586 - * that in most cases the first interrupt comes already with the 587 - * BUSY status bit clear, but on some operations, like mount or 588 - * in the beginning of a write / sync / umount, there is one 589 - * DATAEND interrupt with the BUSY bit set, in this cases 590 - * waiting for one more interrupt fixes the problem. 591 - */ 592 - if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { 593 - disable_mmc_irqs(host, TMIO_STAT_DATAEND); 594 - tasklet_schedule(&host->dma_complete); 595 - } 596 - } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { 597 - disable_mmc_irqs(host, TMIO_STAT_DATAEND); 598 - tasklet_schedule(&host->dma_complete); 599 - } else { 600 - tmio_mmc_do_data_irq(host); 601 - } 602 - out: 603 - spin_unlock(&host->lock); 604 - } 605 - 606 - static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 607 - unsigned int stat) 608 - { 609 - struct mmc_command *cmd = host->cmd; 610 - int i, addr; 611 - 612 - spin_lock(&host->lock); 613 - 614 - if (!host->cmd) { 615 - pr_debug("Spurious CMD irq\n"); 616 - goto out; 617 - } 618 - 619 - host->cmd = NULL; 620 - 621 - /* This controller is sicker than the PXA one. Not only do we need to 622 - * drop the top 8 bits of the first response word, we also need to 623 - * modify the order of the response for short response command types. 624 - */ 625 - 626 - for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 627 - cmd->resp[i] = sd_ctrl_read32(host, addr); 628 - 629 - if (cmd->flags & MMC_RSP_136) { 630 - cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 631 - cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 632 - cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 633 - cmd->resp[3] <<= 8; 634 - } else if (cmd->flags & MMC_RSP_R3) { 635 - cmd->resp[0] = cmd->resp[3]; 636 - } 637 - 638 - if (stat & TMIO_STAT_CMDTIMEOUT) 639 - cmd->error = -ETIMEDOUT; 640 - else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 641 - cmd->error = -EILSEQ; 642 - 643 - /* If there is data to handle we enable data IRQs here, and 644 - * we will ultimatley finish the request in the data_end handler. 645 - * If theres no data or we encountered an error, finish now. 646 - */ 647 - if (host->data && !cmd->error) { 648 - if (host->data->flags & MMC_DATA_READ) { 649 - if (!host->chan_rx) 650 - enable_mmc_irqs(host, TMIO_MASK_READOP); 651 - } else { 652 - if (!host->chan_tx) 653 - enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 654 - else 655 - tasklet_schedule(&host->dma_issue); 656 - } 657 - } else { 658 - tmio_mmc_finish_request(host); 659 - } 660 - 661 - out: 662 - spin_unlock(&host->lock); 663 - 664 - return; 665 - } 666 - 667 - static irqreturn_t tmio_mmc_irq(int irq, void *devid) 668 - { 669 - struct tmio_mmc_host *host = devid; 670 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 671 - unsigned int ireg, irq_mask, status; 672 - unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 673 - 674 - pr_debug("MMC IRQ begin\n"); 675 - 676 - status = sd_ctrl_read32(host, CTL_STATUS); 677 - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 678 - ireg = status & TMIO_MASK_IRQ & ~irq_mask; 679 - 680 - sdio_ireg = 0; 681 - if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { 682 - sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 683 - sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); 684 - sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; 685 - 686 - sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); 687 - 688 - if (sdio_ireg && !host->sdio_irq_enabled) { 689 - pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 690 - sdio_status, sdio_irq_mask, sdio_ireg); 691 - tmio_mmc_enable_sdio_irq(host->mmc, 0); 692 - goto out; 693 - } 694 - 695 - if (host->mmc->caps & MMC_CAP_SDIO_IRQ && 696 - sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 697 - mmc_signal_sdio_irq(host->mmc); 698 - 699 - if (sdio_ireg) 700 - goto out; 701 - } 702 - 703 - pr_debug_status(status); 704 - pr_debug_status(ireg); 705 - 706 - if (!ireg) { 707 - disable_mmc_irqs(host, status & ~irq_mask); 708 - 709 - pr_warning("tmio_mmc: Spurious irq, disabling! " 710 - "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 711 - pr_debug_status(status); 712 - 713 - goto out; 714 - } 715 - 716 - while (ireg) { 717 - /* Card insert / remove attempts */ 718 - if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 719 - ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 720 - TMIO_STAT_CARD_REMOVE); 721 - mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 722 - } 723 - 724 - /* CRC and other errors */ 725 - /* if (ireg & TMIO_STAT_ERR_IRQ) 726 - * handled |= tmio_error_irq(host, irq, stat); 727 - */ 728 - 729 - /* Command completion */ 730 - if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 731 - ack_mmc_irqs(host, 732 - TMIO_STAT_CMDRESPEND | 733 - TMIO_STAT_CMDTIMEOUT); 734 - tmio_mmc_cmd_irq(host, status); 735 - } 736 - 737 - /* Data transfer */ 738 - if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 739 - ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 740 - tmio_mmc_pio_irq(host); 741 - } 742 - 743 - /* Data transfer completion */ 744 - if (ireg & TMIO_STAT_DATAEND) { 745 - ack_mmc_irqs(host, TMIO_STAT_DATAEND); 746 - tmio_mmc_data_irq(host); 747 - } 748 - 749 - /* Check status - keep going until we've handled it all */ 750 - status = sd_ctrl_read32(host, CTL_STATUS); 751 - irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 752 - ireg = status & TMIO_MASK_IRQ & ~irq_mask; 753 - 754 - pr_debug("Status at end of loop: %08x\n", status); 755 - pr_debug_status(status); 756 - } 757 - pr_debug("MMC IRQ end\n"); 758 - 759 - out: 760 - return IRQ_HANDLED; 761 - } 762 - 763 - #ifdef CONFIG_TMIO_MMC_DMA 764 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) 765 - { 766 - if (host->sg_ptr == &host->bounce_sg) { 767 - unsigned long flags; 768 - void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 769 - memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 770 - tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 771 - } 772 - } 773 - 774 - static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 775 - { 776 - #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 777 - /* Switch DMA mode on or off - SuperH specific? */ 778 - sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); 779 - #endif 780 - } 781 - 782 - static void tmio_dma_complete(void *arg) 783 - { 784 - struct tmio_mmc_host *host = arg; 785 - 786 - dev_dbg(&host->pdev->dev, "Command completed\n"); 787 - 788 - if (!host->data) 789 - dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); 790 - else 791 - enable_mmc_irqs(host, TMIO_STAT_DATAEND); 792 - } 793 - 794 - static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 795 - { 796 - struct scatterlist *sg = host->sg_ptr, *sg_tmp; 797 - struct dma_async_tx_descriptor *desc = NULL; 798 - struct dma_chan *chan = host->chan_rx; 799 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 800 - dma_cookie_t cookie; 801 - int ret, i; 802 - bool aligned = true, multiple = true; 803 - unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 804 - 805 - for_each_sg(sg, sg_tmp, host->sg_len, i) { 806 - if (sg_tmp->offset & align) 807 - aligned = false; 808 - if (sg_tmp->length & align) { 809 - multiple = false; 810 - break; 811 - } 812 - } 813 - 814 - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 815 - align >= MAX_ALIGN)) || !multiple) { 816 - ret = -EINVAL; 817 - goto pio; 818 - } 819 - 820 - /* The only sg element can be unaligned, use our bounce buffer then */ 821 - if (!aligned) { 822 - sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 823 - host->sg_ptr = &host->bounce_sg; 824 - sg = host->sg_ptr; 825 - } 826 - 827 - ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 828 - if (ret > 0) 829 - desc = chan->device->device_prep_slave_sg(chan, sg, ret, 830 - DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 831 - 832 - if (desc) { 833 - desc->callback = tmio_dma_complete; 834 - desc->callback_param = host; 835 - cookie = dmaengine_submit(desc); 836 - dma_async_issue_pending(chan); 837 - } 838 - dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 839 - __func__, host->sg_len, ret, cookie, host->mrq); 840 - 841 - pio: 842 - if (!desc) { 843 - /* DMA failed, fall back to PIO */ 844 - if (ret >= 0) 845 - ret = -EIO; 846 - host->chan_rx = NULL; 847 - dma_release_channel(chan); 848 - /* Free the Tx channel too */ 849 - chan = host->chan_tx; 850 - if (chan) { 851 - host->chan_tx = NULL; 852 - dma_release_channel(chan); 853 - } 854 - dev_warn(&host->pdev->dev, 855 - "DMA failed: %d, falling back to PIO\n", ret); 856 - tmio_mmc_enable_dma(host, false); 857 - } 858 - 859 - dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 860 - desc, cookie, host->sg_len); 861 - } 862 - 863 - static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 864 - { 865 - struct scatterlist *sg = host->sg_ptr, *sg_tmp; 866 - struct dma_async_tx_descriptor *desc = NULL; 867 - struct dma_chan *chan = host->chan_tx; 868 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 869 - dma_cookie_t cookie; 870 - int ret, i; 871 - bool aligned = true, multiple = true; 872 - unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 873 - 874 - for_each_sg(sg, sg_tmp, host->sg_len, i) { 875 - if (sg_tmp->offset & align) 876 - aligned = false; 877 - if (sg_tmp->length & align) { 878 - multiple = false; 879 - break; 880 - } 881 - } 882 - 883 - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 884 - align >= MAX_ALIGN)) || !multiple) { 885 - ret = -EINVAL; 886 - goto pio; 887 - } 888 - 889 - /* The only sg element can be unaligned, use our bounce buffer then */ 890 - if (!aligned) { 891 - unsigned long flags; 892 - void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 893 - sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 894 - memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 895 - tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 896 - host->sg_ptr = &host->bounce_sg; 897 - sg = host->sg_ptr; 898 - } 899 - 900 - ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 901 - if (ret > 0) 902 - desc = chan->device->device_prep_slave_sg(chan, sg, ret, 903 - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 904 - 905 - if (desc) { 906 - desc->callback = tmio_dma_complete; 907 - desc->callback_param = host; 908 - cookie = dmaengine_submit(desc); 909 - } 910 - dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 911 - __func__, host->sg_len, ret, cookie, host->mrq); 912 - 913 - pio: 914 - if (!desc) { 915 - /* DMA failed, fall back to PIO */ 916 - if (ret >= 0) 917 - ret = -EIO; 918 - host->chan_tx = NULL; 919 - dma_release_channel(chan); 920 - /* Free the Rx channel too */ 921 - chan = host->chan_rx; 922 - if (chan) { 923 - host->chan_rx = NULL; 924 - dma_release_channel(chan); 925 - } 926 - dev_warn(&host->pdev->dev, 927 - "DMA failed: %d, falling back to PIO\n", ret); 928 - tmio_mmc_enable_dma(host, false); 929 - } 930 - 931 - dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 932 - desc, cookie); 933 - } 934 - 935 - static void tmio_mmc_start_dma(struct tmio_mmc_host *host, 936 - struct mmc_data *data) 937 - { 938 - if (data->flags & MMC_DATA_READ) { 939 - if (host->chan_rx) 940 - tmio_mmc_start_dma_rx(host); 941 - } else { 942 - if (host->chan_tx) 943 - tmio_mmc_start_dma_tx(host); 944 - } 945 - } 946 - 947 - static void tmio_issue_tasklet_fn(unsigned long priv) 948 - { 949 - struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 950 - struct dma_chan *chan = host->chan_tx; 951 - 952 - dma_async_issue_pending(chan); 953 - } 954 - 955 - static void tmio_tasklet_fn(unsigned long arg) 956 - { 957 - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 958 - unsigned long flags; 959 - 960 - spin_lock_irqsave(&host->lock, flags); 961 - 962 - if (!host->data) 963 - goto out; 964 - 965 - if (host->data->flags & MMC_DATA_READ) 966 - dma_unmap_sg(host->chan_rx->device->dev, 967 - host->sg_ptr, host->sg_len, 968 - DMA_FROM_DEVICE); 969 - else 970 - dma_unmap_sg(host->chan_tx->device->dev, 971 - host->sg_ptr, host->sg_len, 972 - DMA_TO_DEVICE); 973 - 974 - tmio_mmc_do_data_irq(host); 975 - out: 976 - spin_unlock_irqrestore(&host->lock, flags); 977 - } 978 - 979 - /* It might be necessary to make filter MFD specific */ 980 - static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) 981 - { 982 - dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); 983 - chan->private = arg; 984 - return true; 985 - } 986 - 987 - static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 988 - struct tmio_mmc_data *pdata) 989 - { 990 - /* We can only either use DMA for both Tx and Rx or not use it at all */ 991 - if (pdata->dma) { 992 - dma_cap_mask_t mask; 993 - 994 - dma_cap_zero(mask); 995 - dma_cap_set(DMA_SLAVE, mask); 996 - 997 - host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, 998 - pdata->dma->chan_priv_tx); 999 - dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 1000 - host->chan_tx); 1001 - 1002 - if (!host->chan_tx) 1003 - return; 1004 - 1005 - host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, 1006 - pdata->dma->chan_priv_rx); 1007 - dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 1008 - host->chan_rx); 1009 - 1010 - if (!host->chan_rx) { 1011 - dma_release_channel(host->chan_tx); 1012 - host->chan_tx = NULL; 1013 - return; 1014 - } 1015 - 1016 - tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); 1017 - tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); 1018 - 1019 - tmio_mmc_enable_dma(host, true); 1020 - } 1021 - } 1022 - 1023 - static void tmio_mmc_release_dma(struct tmio_mmc_host *host) 1024 - { 1025 - if (host->chan_tx) { 1026 - struct dma_chan *chan = host->chan_tx; 1027 - host->chan_tx = NULL; 1028 - dma_release_channel(chan); 1029 - } 1030 - if (host->chan_rx) { 1031 - struct dma_chan *chan = host->chan_rx; 1032 - host->chan_rx = NULL; 1033 - dma_release_channel(chan); 1034 - } 1035 - } 1036 - #else 1037 - static void tmio_check_bounce_buffer(struct tmio_mmc_host *host) 1038 - { 1039 - } 1040 - 1041 - static void tmio_mmc_start_dma(struct tmio_mmc_host *host, 1042 - struct mmc_data *data) 1043 - { 1044 - } 1045 - 1046 - static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 1047 - struct tmio_mmc_data *pdata) 1048 - { 1049 - host->chan_tx = NULL; 1050 - host->chan_rx = NULL; 1051 - } 1052 - 1053 - static void tmio_mmc_release_dma(struct tmio_mmc_host *host) 1054 - { 1055 - } 1056 - #endif 1057 - 1058 - static int tmio_mmc_start_data(struct tmio_mmc_host *host, 1059 - struct mmc_data *data) 1060 - { 1061 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1062 - 1063 - pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 1064 - data->blksz, data->blocks); 1065 - 1066 - /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 1067 - if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 1068 - int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 1069 - 1070 - if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 1071 - pr_err("%s: %d byte block unsupported in 4 bit mode\n", 1072 - mmc_hostname(host->mmc), data->blksz); 1073 - return -EINVAL; 1074 - } 1075 - } 1076 - 1077 - tmio_mmc_init_sg(host, data); 1078 - host->data = data; 1079 - 1080 - /* Set transfer length / blocksize */ 1081 - sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 1082 - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 1083 - 1084 - tmio_mmc_start_dma(host, data); 1085 - 1086 - return 0; 1087 - } 1088 - 1089 - /* Process requests from the MMC layer */ 1090 - static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 1091 - { 1092 - struct tmio_mmc_host *host = mmc_priv(mmc); 1093 - int ret; 1094 - 1095 - if (host->mrq) 1096 - pr_debug("request not null\n"); 1097 - 1098 - host->last_req_ts = jiffies; 1099 - wmb(); 1100 - host->mrq = mrq; 1101 - 1102 - if (mrq->data) { 1103 - ret = tmio_mmc_start_data(host, mrq->data); 1104 - if (ret) 1105 - goto fail; 1106 - } 1107 - 1108 - ret = tmio_mmc_start_command(host, mrq->cmd); 1109 - if (!ret) { 1110 - schedule_delayed_work(&host->delayed_reset_work, 1111 - msecs_to_jiffies(2000)); 1112 - return; 1113 - } 1114 - 1115 - fail: 1116 - host->mrq = NULL; 1117 - mrq->cmd->error = ret; 1118 - mmc_request_done(mmc, mrq); 1119 - } 1120 - 1121 - /* Set MMC clock / power. 1122 - * Note: This controller uses a simple divider scheme therefore it cannot 1123 - * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 1124 - * MMC wont run that fast, it has to be clocked at 12MHz which is the next 1125 - * slowest setting. 1126 - */ 1127 - static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1128 - { 1129 - struct tmio_mmc_host *host = mmc_priv(mmc); 1130 - 1131 - if (ios->clock) 1132 - tmio_mmc_set_clock(host, ios->clock); 1133 - 1134 - /* Power sequence - OFF -> ON -> UP */ 1135 - switch (ios->power_mode) { 1136 - case MMC_POWER_OFF: /* power down SD bus */ 1137 - if (host->set_pwr) 1138 - host->set_pwr(host->pdev, 0); 1139 - tmio_mmc_clk_stop(host); 1140 - break; 1141 - case MMC_POWER_ON: /* power up SD bus */ 1142 - if (host->set_pwr) 1143 - host->set_pwr(host->pdev, 1); 1144 - break; 1145 - case MMC_POWER_UP: /* start bus clock */ 1146 - tmio_mmc_clk_start(host); 1147 - break; 1148 - } 1149 - 1150 - switch (ios->bus_width) { 1151 - case MMC_BUS_WIDTH_1: 1152 - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 1153 - break; 1154 - case MMC_BUS_WIDTH_4: 1155 - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 1156 - break; 1157 - } 1158 - 1159 - /* Let things settle. delay taken from winCE driver */ 1160 - udelay(140); 1161 - } 1162 - 1163 - static int tmio_mmc_get_ro(struct mmc_host *mmc) 1164 - { 1165 - struct tmio_mmc_host *host = mmc_priv(mmc); 1166 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1167 - 1168 - return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 1169 - (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; 1170 - } 1171 - 1172 - static int tmio_mmc_get_cd(struct mmc_host *mmc) 1173 - { 1174 - struct tmio_mmc_host *host = mmc_priv(mmc); 1175 - struct tmio_mmc_data *pdata = mfd_get_data(host->pdev); 1176 - 1177 - if (!pdata->get_cd) 1178 - return -ENOSYS; 1179 - else 1180 - return pdata->get_cd(host->pdev); 1181 - } 1182 - 1183 - static const struct mmc_host_ops tmio_mmc_ops = { 1184 - .request = tmio_mmc_request, 1185 - .set_ios = tmio_mmc_set_ios, 1186 - .get_ro = tmio_mmc_get_ro, 1187 - .get_cd = tmio_mmc_get_cd, 1188 - .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 1189 - }; 24 + #include "tmio_mmc.h" 1190 25 1191 26 #ifdef CONFIG_PM 1192 27 static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) ··· 62 1227 #define tmio_mmc_resume NULL 63 1228 #endif 64 1229 65 - static int __devinit tmio_mmc_probe(struct platform_device *dev) 1230 + static int __devinit tmio_mmc_probe(struct platform_device *pdev) 66 1231 { 67 - const struct mfd_cell *cell = mfd_get_cell(dev); 1232 + const struct mfd_cell *cell = mfd_get_cell(pdev); 68 1233 struct tmio_mmc_data *pdata; 69 - struct resource *res_ctl; 70 1234 struct tmio_mmc_host *host; 71 - struct mmc_host *mmc; 72 1235 int ret = -EINVAL; 73 - u32 irq_mask = TMIO_MASK_CMD; 74 1236 75 - if (dev->num_resources != 2) 1237 + if (pdev->num_resources != 2) 76 1238 goto out; 77 1239 78 - res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 79 - if (!res_ctl) 80 - goto out; 81 - 82 - pdata = mfd_get_data(dev); 1240 + pdata = mfd_get_data(pdev); 83 1241 if (!pdata || !pdata->hclk) 84 1242 goto out; 85 1243 86 - ret = -ENOMEM; 87 - 88 - mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); 89 - if (!mmc) 90 - goto out; 91 - 92 - host = mmc_priv(mmc); 93 - host->mmc = mmc; 94 - host->pdev = dev; 95 - platform_set_drvdata(dev, mmc); 96 - 97 - host->set_pwr = pdata->set_pwr; 98 - host->set_clk_div = pdata->set_clk_div; 99 - 100 - /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 101 - host->bus_shift = resource_size(res_ctl) >> 10; 102 - 103 - host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 104 - if (!host->ctl) 105 - goto host_free; 106 - 107 - mmc->ops = &tmio_mmc_ops; 108 - mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; 109 - mmc->f_max = pdata->hclk; 110 - mmc->f_min = mmc->f_max / 512; 111 - mmc->max_segs = 32; 112 - mmc->max_blk_size = 512; 113 - mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 114 - mmc->max_segs; 115 - mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 116 - mmc->max_seg_size = mmc->max_req_size; 117 - if (pdata->ocr_mask) 118 - mmc->ocr_avail = pdata->ocr_mask; 119 - else 120 - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 121 - 122 1244 /* Tell the MFD core we are ready to be enabled */ 123 1245 if (cell->enable) { 124 - ret = cell->enable(dev); 1246 + ret = cell->enable(pdev); 125 1247 if (ret) 126 - goto unmap_ctl; 1248 + goto out; 127 1249 } 128 1250 129 - tmio_mmc_clk_stop(host); 130 - reset(host); 131 - 132 - ret = platform_get_irq(dev, 0); 133 - if (ret >= 0) 134 - host->irq = ret; 135 - else 136 - goto cell_disable; 137 - 138 - disable_mmc_irqs(host, TMIO_MASK_ALL); 139 - if (pdata->flags & TMIO_MMC_SDIO_IRQ) 140 - tmio_mmc_enable_sdio_irq(mmc, 0); 141 - 142 - ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 143 - IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); 1251 + ret = tmio_mmc_host_probe(&host, pdev, pdata); 144 1252 if (ret) 145 1253 goto cell_disable; 146 1254 147 - spin_lock_init(&host->lock); 148 - 149 - /* Init delayed work for request timeouts */ 150 - INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work); 151 - 152 - /* See if we also get DMA */ 153 - tmio_mmc_request_dma(host, pdata); 154 - 155 - mmc_add_host(mmc); 156 - 157 1255 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 158 1256 (unsigned long)host->ctl, host->irq); 159 - 160 - /* Unmask the IRQs we want to know about */ 161 - if (!host->chan_rx) 162 - irq_mask |= TMIO_MASK_READOP; 163 - if (!host->chan_tx) 164 - irq_mask |= TMIO_MASK_WRITEOP; 165 - enable_mmc_irqs(host, irq_mask); 166 1257 167 1258 return 0; 168 1259 169 1260 cell_disable: 170 1261 if (cell->disable) 171 - cell->disable(dev); 172 - unmap_ctl: 173 - iounmap(host->ctl); 174 - host_free: 175 - mmc_free_host(mmc); 1262 + cell->disable(pdev); 176 1263 out: 177 1264 return ret; 178 1265 } 179 1266 180 - static int __devexit tmio_mmc_remove(struct platform_device *dev) 1267 + static int __devexit tmio_mmc_remove(struct platform_device *pdev) 181 1268 { 182 - const struct mfd_cell *cell = mfd_get_cell(dev); 183 - struct mmc_host *mmc = platform_get_drvdata(dev); 1269 + const struct mfd_cell *cell = mfd_get_cell(pdev); 1270 + struct mmc_host *mmc = platform_get_drvdata(pdev); 184 1271 185 - platform_set_drvdata(dev, NULL); 1272 + platform_set_drvdata(pdev, NULL); 186 1273 187 1274 if (mmc) { 188 - struct tmio_mmc_host *host = mmc_priv(mmc); 189 - mmc_remove_host(mmc); 190 - cancel_delayed_work_sync(&host->delayed_reset_work); 191 - tmio_mmc_release_dma(host); 192 - free_irq(host->irq, host); 1275 + tmio_mmc_host_remove(mmc_priv(mmc)); 193 1276 if (cell->disable) 194 - cell->disable(dev); 195 - iounmap(host->ctl); 196 - mmc_free_host(mmc); 1277 + cell->disable(pdev); 197 1278 } 198 1279 199 1280 return 0;
+123
drivers/mmc/host/tmio_mmc.h
··· 1 + /* 2 + * linux/drivers/mmc/host/tmio_mmc.h 3 + * 4 + * Copyright (C) 2007 Ian Molton 5 + * Copyright (C) 2004 Ian Molton 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * Driver for the MMC / SD / SDIO cell found in: 12 + * 13 + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 + */ 15 + 16 + #ifndef TMIO_MMC_H 17 + #define TMIO_MMC_H 18 + 19 + #include <linux/highmem.h> 20 + #include <linux/mmc/tmio.h> 21 + #include <linux/pagemap.h> 22 + 23 + /* Definitions for values the CTRL_SDIO_STATUS register can take. */ 24 + #define TMIO_SDIO_STAT_IOIRQ 0x0001 25 + #define TMIO_SDIO_STAT_EXPUB52 0x4000 26 + #define TMIO_SDIO_STAT_EXWT 0x8000 27 + #define TMIO_SDIO_MASK_ALL 0xc007 28 + 29 + /* Define some IRQ masks */ 30 + /* This is the mask used at reset by the chip */ 31 + #define TMIO_MASK_ALL 0x837f031d 32 + #define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND) 33 + #define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND) 34 + #define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \ 35 + TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 36 + #define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 37 + 38 + struct tmio_mmc_data; 39 + 40 + struct tmio_mmc_host { 41 + void __iomem *ctl; 42 + unsigned long bus_shift; 43 + struct mmc_command *cmd; 44 + struct mmc_request *mrq; 45 + struct mmc_data *data; 46 + struct mmc_host *mmc; 47 + int irq; 48 + unsigned int sdio_irq_enabled; 49 + 50 + /* Callbacks for clock / power control */ 51 + void (*set_pwr)(struct platform_device *host, int state); 52 + void (*set_clk_div)(struct platform_device *host, int state); 53 + 54 + /* pio related stuff */ 55 + struct scatterlist *sg_ptr; 56 + struct scatterlist *sg_orig; 57 + unsigned int sg_len; 58 + unsigned int sg_off; 59 + 60 + struct platform_device *pdev; 61 + struct tmio_mmc_data *pdata; 62 + 63 + /* DMA support */ 64 + bool force_pio; 65 + struct dma_chan *chan_rx; 66 + struct dma_chan *chan_tx; 67 + struct tasklet_struct dma_complete; 68 + struct tasklet_struct dma_issue; 69 + struct scatterlist bounce_sg; 70 + u8 *bounce_buf; 71 + 72 + /* Track lost interrupts */ 73 + struct delayed_work delayed_reset_work; 74 + spinlock_t lock; 75 + unsigned long last_req_ts; 76 + }; 77 + 78 + int tmio_mmc_host_probe(struct tmio_mmc_host **host, 79 + struct platform_device *pdev, 80 + struct tmio_mmc_data *pdata); 81 + void tmio_mmc_host_remove(struct tmio_mmc_host *host); 82 + void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); 83 + 84 + void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 85 + void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 86 + 87 + static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, 88 + unsigned long *flags) 89 + { 90 + local_irq_save(*flags); 91 + return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 92 + } 93 + 94 + static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, 95 + unsigned long *flags, void *virt) 96 + { 97 + kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ); 98 + local_irq_restore(*flags); 99 + } 100 + 101 + #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) 102 + void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); 103 + void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); 104 + void tmio_mmc_release_dma(struct tmio_mmc_host *host); 105 + #else 106 + static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, 107 + struct mmc_data *data) 108 + { 109 + } 110 + 111 + static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, 112 + struct tmio_mmc_data *pdata) 113 + { 114 + host->chan_tx = NULL; 115 + host->chan_rx = NULL; 116 + } 117 + 118 + static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) 119 + { 120 + } 121 + #endif 122 + 123 + #endif
+317
drivers/mmc/host/tmio_mmc_dma.c
··· 1 + /* 2 + * linux/drivers/mmc/tmio_mmc_dma.c 3 + * 4 + * Copyright (C) 2010-2011 Guennadi Liakhovetski 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * DMA function for TMIO MMC implementations 11 + */ 12 + 13 + #include <linux/device.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/mfd/tmio.h> 16 + #include <linux/mmc/host.h> 17 + #include <linux/mmc/tmio.h> 18 + #include <linux/pagemap.h> 19 + #include <linux/scatterlist.h> 20 + 21 + #include "tmio_mmc.h" 22 + 23 + #define TMIO_MMC_MIN_DMA_LEN 8 24 + 25 + static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 26 + { 27 + #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 28 + /* Switch DMA mode on or off - SuperH specific? */ 29 + writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); 30 + #endif 31 + } 32 + 33 + static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 34 + { 35 + struct scatterlist *sg = host->sg_ptr, *sg_tmp; 36 + struct dma_async_tx_descriptor *desc = NULL; 37 + struct dma_chan *chan = host->chan_rx; 38 + struct tmio_mmc_data *pdata = host->pdata; 39 + dma_cookie_t cookie; 40 + int ret, i; 41 + bool aligned = true, multiple = true; 42 + unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 43 + 44 + for_each_sg(sg, sg_tmp, host->sg_len, i) { 45 + if (sg_tmp->offset & align) 46 + aligned = false; 47 + if (sg_tmp->length & align) { 48 + multiple = false; 49 + break; 50 + } 51 + } 52 + 53 + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 54 + (align & PAGE_MASK))) || !multiple) { 55 + ret = -EINVAL; 56 + goto pio; 57 + } 58 + 59 + if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 60 + host->force_pio = true; 61 + return; 62 + } 63 + 64 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); 65 + 66 + /* The only sg element can be unaligned, use our bounce buffer then */ 67 + if (!aligned) { 68 + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 69 + host->sg_ptr = &host->bounce_sg; 70 + sg = host->sg_ptr; 71 + } 72 + 73 + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 74 + if (ret > 0) 75 + desc = chan->device->device_prep_slave_sg(chan, sg, ret, 76 + DMA_FROM_DEVICE, DMA_CTRL_ACK); 77 + 78 + if (desc) { 79 + cookie = dmaengine_submit(desc); 80 + if (cookie < 0) { 81 + desc = NULL; 82 + ret = cookie; 83 + } 84 + } 85 + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 86 + __func__, host->sg_len, ret, cookie, host->mrq); 87 + 88 + pio: 89 + if (!desc) { 90 + /* DMA failed, fall back to PIO */ 91 + if (ret >= 0) 92 + ret = -EIO; 93 + host->chan_rx = NULL; 94 + dma_release_channel(chan); 95 + /* Free the Tx channel too */ 96 + chan = host->chan_tx; 97 + if (chan) { 98 + host->chan_tx = NULL; 99 + dma_release_channel(chan); 100 + } 101 + dev_warn(&host->pdev->dev, 102 + "DMA failed: %d, falling back to PIO\n", ret); 103 + tmio_mmc_enable_dma(host, false); 104 + } 105 + 106 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 107 + desc, cookie, host->sg_len); 108 + } 109 + 110 + static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 111 + { 112 + struct scatterlist *sg = host->sg_ptr, *sg_tmp; 113 + struct dma_async_tx_descriptor *desc = NULL; 114 + struct dma_chan *chan = host->chan_tx; 115 + struct tmio_mmc_data *pdata = host->pdata; 116 + dma_cookie_t cookie; 117 + int ret, i; 118 + bool aligned = true, multiple = true; 119 + unsigned int align = (1 << pdata->dma->alignment_shift) - 1; 120 + 121 + for_each_sg(sg, sg_tmp, host->sg_len, i) { 122 + if (sg_tmp->offset & align) 123 + aligned = false; 124 + if (sg_tmp->length & align) { 125 + multiple = false; 126 + break; 127 + } 128 + } 129 + 130 + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 131 + (align & PAGE_MASK))) || !multiple) { 132 + ret = -EINVAL; 133 + goto pio; 134 + } 135 + 136 + if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 137 + host->force_pio = true; 138 + return; 139 + } 140 + 141 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); 142 + 143 + /* The only sg element can be unaligned, use our bounce buffer then */ 144 + if (!aligned) { 145 + unsigned long flags; 146 + void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 147 + sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 148 + memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 149 + tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 150 + host->sg_ptr = &host->bounce_sg; 151 + sg = host->sg_ptr; 152 + } 153 + 154 + ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 155 + if (ret > 0) 156 + desc = chan->device->device_prep_slave_sg(chan, sg, ret, 157 + DMA_TO_DEVICE, DMA_CTRL_ACK); 158 + 159 + if (desc) { 160 + cookie = dmaengine_submit(desc); 161 + if (cookie < 0) { 162 + desc = NULL; 163 + ret = cookie; 164 + } 165 + } 166 + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 167 + __func__, host->sg_len, ret, cookie, host->mrq); 168 + 169 + pio: 170 + if (!desc) { 171 + /* DMA failed, fall back to PIO */ 172 + if (ret >= 0) 173 + ret = -EIO; 174 + host->chan_tx = NULL; 175 + dma_release_channel(chan); 176 + /* Free the Rx channel too */ 177 + chan = host->chan_rx; 178 + if (chan) { 179 + host->chan_rx = NULL; 180 + dma_release_channel(chan); 181 + } 182 + dev_warn(&host->pdev->dev, 183 + "DMA failed: %d, falling back to PIO\n", ret); 184 + tmio_mmc_enable_dma(host, false); 185 + } 186 + 187 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 188 + desc, cookie); 189 + } 190 + 191 + void tmio_mmc_start_dma(struct tmio_mmc_host *host, 192 + struct mmc_data *data) 193 + { 194 + if (data->flags & MMC_DATA_READ) { 195 + if (host->chan_rx) 196 + tmio_mmc_start_dma_rx(host); 197 + } else { 198 + if (host->chan_tx) 199 + tmio_mmc_start_dma_tx(host); 200 + } 201 + } 202 + 203 + static void tmio_mmc_issue_tasklet_fn(unsigned long priv) 204 + { 205 + struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 206 + struct dma_chan *chan = NULL; 207 + 208 + spin_lock_irq(&host->lock); 209 + 210 + if (host && host->data) { 211 + if (host->data->flags & MMC_DATA_READ) 212 + chan = host->chan_rx; 213 + else 214 + chan = host->chan_tx; 215 + } 216 + 217 + spin_unlock_irq(&host->lock); 218 + 219 + tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 220 + 221 + if (chan) 222 + dma_async_issue_pending(chan); 223 + } 224 + 225 + static void tmio_mmc_tasklet_fn(unsigned long arg) 226 + { 227 + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 228 + 229 + spin_lock_irq(&host->lock); 230 + 231 + if (!host->data) 232 + goto out; 233 + 234 + if (host->data->flags & MMC_DATA_READ) 235 + dma_unmap_sg(host->chan_rx->device->dev, 236 + host->sg_ptr, host->sg_len, 237 + DMA_FROM_DEVICE); 238 + else 239 + dma_unmap_sg(host->chan_tx->device->dev, 240 + host->sg_ptr, host->sg_len, 241 + DMA_TO_DEVICE); 242 + 243 + tmio_mmc_do_data_irq(host); 244 + out: 245 + spin_unlock_irq(&host->lock); 246 + } 247 + 248 + /* It might be necessary to make filter MFD specific */ 249 + static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) 250 + { 251 + dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); 252 + chan->private = arg; 253 + return true; 254 + } 255 + 256 + void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) 257 + { 258 + /* We can only either use DMA for both Tx and Rx or not use it at all */ 259 + if (pdata->dma) { 260 + dma_cap_mask_t mask; 261 + 262 + dma_cap_zero(mask); 263 + dma_cap_set(DMA_SLAVE, mask); 264 + 265 + host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, 266 + pdata->dma->chan_priv_tx); 267 + dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 268 + host->chan_tx); 269 + 270 + if (!host->chan_tx) 271 + return; 272 + 273 + host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, 274 + pdata->dma->chan_priv_rx); 275 + dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 276 + host->chan_rx); 277 + 278 + if (!host->chan_rx) 279 + goto ereqrx; 280 + 281 + host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 282 + if (!host->bounce_buf) 283 + goto ebouncebuf; 284 + 285 + tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); 286 + tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); 287 + 288 + tmio_mmc_enable_dma(host, true); 289 + 290 + return; 291 + ebouncebuf: 292 + dma_release_channel(host->chan_rx); 293 + host->chan_rx = NULL; 294 + ereqrx: 295 + dma_release_channel(host->chan_tx); 296 + host->chan_tx = NULL; 297 + return; 298 + } 299 + } 300 + 301 + void tmio_mmc_release_dma(struct tmio_mmc_host *host) 302 + { 303 + if (host->chan_tx) { 304 + struct dma_chan *chan = host->chan_tx; 305 + host->chan_tx = NULL; 306 + dma_release_channel(chan); 307 + } 308 + if (host->chan_rx) { 309 + struct dma_chan *chan = host->chan_rx; 310 + host->chan_rx = NULL; 311 + dma_release_channel(chan); 312 + } 313 + if (host->bounce_buf) { 314 + free_pages((unsigned long)host->bounce_buf, 0); 315 + host->bounce_buf = NULL; 316 + } 317 + }
+897
drivers/mmc/host/tmio_mmc_pio.c
··· 1 + /* 2 + * linux/drivers/mmc/host/tmio_mmc_pio.c 3 + * 4 + * Copyright (C) 2011 Guennadi Liakhovetski 5 + * Copyright (C) 2007 Ian Molton 6 + * Copyright (C) 2004 Ian Molton 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * Driver for the MMC / SD / SDIO IP found in: 13 + * 14 + * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs 15 + * 16 + * This driver draws mainly on scattered spec sheets, Reverse engineering 17 + * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 18 + * support). (Further 4 bit support from a later datasheet). 19 + * 20 + * TODO: 21 + * Investigate using a workqueue for PIO transfers 22 + * Eliminate FIXMEs 23 + * SDIO support 24 + * Better Power management 25 + * Handle MMC errors better 26 + * double buffer support 27 + * 28 + */ 29 + 30 + #include <linux/delay.h> 31 + #include <linux/device.h> 32 + #include <linux/highmem.h> 33 + #include <linux/interrupt.h> 34 + #include <linux/io.h> 35 + #include <linux/irq.h> 36 + #include <linux/mfd/tmio.h> 37 + #include <linux/mmc/host.h> 38 + #include <linux/mmc/tmio.h> 39 + #include <linux/module.h> 40 + #include <linux/pagemap.h> 41 + #include <linux/platform_device.h> 42 + #include <linux/scatterlist.h> 43 + #include <linux/workqueue.h> 44 + #include <linux/spinlock.h> 45 + 46 + #include "tmio_mmc.h" 47 + 48 + static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) 49 + { 50 + return readw(host->ctl + (addr << host->bus_shift)); 51 + } 52 + 53 + static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, 54 + u16 *buf, int count) 55 + { 56 + readsw(host->ctl + (addr << host->bus_shift), buf, count); 57 + } 58 + 59 + static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) 60 + { 61 + return readw(host->ctl + (addr << host->bus_shift)) | 62 + readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; 63 + } 64 + 65 + static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) 66 + { 67 + writew(val, host->ctl + (addr << host->bus_shift)); 68 + } 69 + 70 + static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, 71 + u16 *buf, int count) 72 + { 73 + writesw(host->ctl + (addr << host->bus_shift), buf, count); 74 + } 75 + 76 + static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) 77 + { 78 + writew(val, host->ctl + (addr << host->bus_shift)); 79 + writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); 80 + } 81 + 82 + void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 83 + { 84 + u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ); 85 + sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 86 + } 87 + 88 + void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) 89 + { 90 + u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ); 91 + sd_ctrl_write32(host, CTL_IRQ_MASK, mask); 92 + } 93 + 94 + static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) 95 + { 96 + sd_ctrl_write32(host, CTL_STATUS, ~i); 97 + } 98 + 99 + static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) 100 + { 101 + host->sg_len = data->sg_len; 102 + host->sg_ptr = data->sg; 103 + host->sg_orig = data->sg; 104 + host->sg_off = 0; 105 + } 106 + 107 + static int tmio_mmc_next_sg(struct tmio_mmc_host *host) 108 + { 109 + host->sg_ptr = sg_next(host->sg_ptr); 110 + host->sg_off = 0; 111 + return --host->sg_len; 112 + } 113 + 114 + #ifdef CONFIG_MMC_DEBUG 115 + 116 + #define STATUS_TO_TEXT(a, status, i) \ 117 + do { \ 118 + if (status & TMIO_STAT_##a) { \ 119 + if (i++) \ 120 + printk(" | "); \ 121 + printk(#a); \ 122 + } \ 123 + } while (0) 124 + 125 + static void pr_debug_status(u32 status) 126 + { 127 + int i = 0; 128 + printk(KERN_DEBUG "status: %08x = ", status); 129 + STATUS_TO_TEXT(CARD_REMOVE, status, i); 130 + STATUS_TO_TEXT(CARD_INSERT, status, i); 131 + STATUS_TO_TEXT(SIGSTATE, status, i); 132 + STATUS_TO_TEXT(WRPROTECT, status, i); 133 + STATUS_TO_TEXT(CARD_REMOVE_A, status, i); 134 + STATUS_TO_TEXT(CARD_INSERT_A, status, i); 135 + STATUS_TO_TEXT(SIGSTATE_A, status, i); 136 + STATUS_TO_TEXT(CMD_IDX_ERR, status, i); 137 + STATUS_TO_TEXT(STOPBIT_ERR, status, i); 138 + STATUS_TO_TEXT(ILL_FUNC, status, i); 139 + STATUS_TO_TEXT(CMD_BUSY, status, i); 140 + STATUS_TO_TEXT(CMDRESPEND, status, i); 141 + STATUS_TO_TEXT(DATAEND, status, i); 142 + STATUS_TO_TEXT(CRCFAIL, status, i); 143 + STATUS_TO_TEXT(DATATIMEOUT, status, i); 144 + STATUS_TO_TEXT(CMDTIMEOUT, status, i); 145 + STATUS_TO_TEXT(RXOVERFLOW, status, i); 146 + STATUS_TO_TEXT(TXUNDERRUN, status, i); 147 + STATUS_TO_TEXT(RXRDY, status, i); 148 + STATUS_TO_TEXT(TXRQ, status, i); 149 + STATUS_TO_TEXT(ILL_ACCESS, status, i); 150 + printk("\n"); 151 + } 152 + 153 + #else 154 + #define pr_debug_status(s) do { } while (0) 155 + #endif 156 + 157 + static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 158 + { 159 + struct tmio_mmc_host *host = mmc_priv(mmc); 160 + 161 + if (enable) { 162 + host->sdio_irq_enabled = 1; 163 + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); 164 + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, 165 + (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ)); 166 + } else { 167 + sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL); 168 + sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 169 + host->sdio_irq_enabled = 0; 170 + } 171 + } 172 + 173 + static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 174 + { 175 + u32 clk = 0, clock; 176 + 177 + if (new_clock) { 178 + for (clock = host->mmc->f_min, clk = 0x80000080; 179 + new_clock >= (clock<<1); clk >>= 1) 180 + clock <<= 1; 181 + clk |= 0x100; 182 + } 183 + 184 + if (host->set_clk_div) 185 + host->set_clk_div(host->pdev, (clk>>22) & 1); 186 + 187 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); 188 + } 189 + 190 + static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 191 + { 192 + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 193 + 194 + /* implicit BUG_ON(!res) */ 195 + if (resource_size(res) > 0x100) { 196 + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 197 + msleep(10); 198 + } 199 + 200 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 201 + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 202 + msleep(10); 203 + } 204 + 205 + static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 206 + { 207 + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 208 + 209 + sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 210 + sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 211 + msleep(10); 212 + 213 + /* implicit BUG_ON(!res) */ 214 + if (resource_size(res) > 0x100) { 215 + sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 216 + msleep(10); 217 + } 218 + } 219 + 220 + static void tmio_mmc_reset(struct tmio_mmc_host *host) 221 + { 222 + struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 223 + 224 + /* FIXME - should we set stop clock reg here */ 225 + sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); 226 + /* implicit BUG_ON(!res) */ 227 + if (resource_size(res) > 0x100) 228 + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); 229 + msleep(10); 230 + sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 231 + if (resource_size(res) > 0x100) 232 + sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); 233 + msleep(10); 234 + } 235 + 236 + static void tmio_mmc_reset_work(struct work_struct *work) 237 + { 238 + struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, 239 + delayed_reset_work.work); 240 + struct mmc_request *mrq; 241 + unsigned long flags; 242 + 243 + spin_lock_irqsave(&host->lock, flags); 244 + mrq = host->mrq; 245 + 246 + /* request already finished */ 247 + if (!mrq 248 + || time_is_after_jiffies(host->last_req_ts + 249 + msecs_to_jiffies(2000))) { 250 + spin_unlock_irqrestore(&host->lock, flags); 251 + return; 252 + } 253 + 254 + dev_warn(&host->pdev->dev, 255 + "timeout waiting for hardware interrupt (CMD%u)\n", 256 + mrq->cmd->opcode); 257 + 258 + if (host->data) 259 + host->data->error = -ETIMEDOUT; 260 + else if (host->cmd) 261 + host->cmd->error = -ETIMEDOUT; 262 + else 263 + mrq->cmd->error = -ETIMEDOUT; 264 + 265 + host->cmd = NULL; 266 + host->data = NULL; 267 + host->mrq = NULL; 268 + host->force_pio = false; 269 + 270 + spin_unlock_irqrestore(&host->lock, flags); 271 + 272 + tmio_mmc_reset(host); 273 + 274 + mmc_request_done(host->mmc, mrq); 275 + } 276 + 277 + static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 278 + { 279 + struct mmc_request *mrq = host->mrq; 280 + 281 + if (!mrq) 282 + return; 283 + 284 + host->mrq = NULL; 285 + host->cmd = NULL; 286 + host->data = NULL; 287 + host->force_pio = false; 288 + 289 + cancel_delayed_work(&host->delayed_reset_work); 290 + 291 + mmc_request_done(host->mmc, mrq); 292 + } 293 + 294 + /* These are the bitmasks the tmio chip requires to implement the MMC response 295 + * types. Note that R1 and R6 are the same in this scheme. */ 296 + #define APP_CMD 0x0040 297 + #define RESP_NONE 0x0300 298 + #define RESP_R1 0x0400 299 + #define RESP_R1B 0x0500 300 + #define RESP_R2 0x0600 301 + #define RESP_R3 0x0700 302 + #define DATA_PRESENT 0x0800 303 + #define TRANSFER_READ 0x1000 304 + #define TRANSFER_MULTI 0x2000 305 + #define SECURITY_CMD 0x4000 306 + 307 + static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 308 + { 309 + struct mmc_data *data = host->data; 310 + int c = cmd->opcode; 311 + 312 + /* Command 12 is handled by hardware */ 313 + if (cmd->opcode == 12 && !cmd->arg) { 314 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); 315 + return 0; 316 + } 317 + 318 + switch (mmc_resp_type(cmd)) { 319 + case MMC_RSP_NONE: c |= RESP_NONE; break; 320 + case MMC_RSP_R1: c |= RESP_R1; break; 321 + case MMC_RSP_R1B: c |= RESP_R1B; break; 322 + case MMC_RSP_R2: c |= RESP_R2; break; 323 + case MMC_RSP_R3: c |= RESP_R3; break; 324 + default: 325 + pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); 326 + return -EINVAL; 327 + } 328 + 329 + host->cmd = cmd; 330 + 331 + /* FIXME - this seems to be ok commented out but the spec suggest this bit 332 + * should be set when issuing app commands. 333 + * if(cmd->flags & MMC_FLAG_ACMD) 334 + * c |= APP_CMD; 335 + */ 336 + if (data) { 337 + c |= DATA_PRESENT; 338 + if (data->blocks > 1) { 339 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); 340 + c |= TRANSFER_MULTI; 341 + } 342 + if (data->flags & MMC_DATA_READ) 343 + c |= TRANSFER_READ; 344 + } 345 + 346 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); 347 + 348 + /* Fire off the command */ 349 + sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); 350 + sd_ctrl_write16(host, CTL_SD_CMD, c); 351 + 352 + return 0; 353 + } 354 + 355 + /* 356 + * This chip always returns (at least?) as much data as you ask for. 357 + * I'm unsure what happens if you ask for less than a block. This should be 358 + * looked into to ensure that a funny length read doesnt hose the controller. 359 + */ 360 + static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 361 + { 362 + struct mmc_data *data = host->data; 363 + void *sg_virt; 364 + unsigned short *buf; 365 + unsigned int count; 366 + unsigned long flags; 367 + 368 + if ((host->chan_tx || host->chan_rx) && !host->force_pio) { 369 + pr_err("PIO IRQ in DMA mode!\n"); 370 + return; 371 + } else if (!data) { 372 + pr_debug("Spurious PIO IRQ\n"); 373 + return; 374 + } 375 + 376 + sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); 377 + buf = (unsigned short *)(sg_virt + host->sg_off); 378 + 379 + count = host->sg_ptr->length - host->sg_off; 380 + if (count > data->blksz) 381 + count = data->blksz; 382 + 383 + pr_debug("count: %08x offset: %08x flags %08x\n", 384 + count, host->sg_off, data->flags); 385 + 386 + /* Transfer the data */ 387 + if (data->flags & MMC_DATA_READ) 388 + sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 389 + else 390 + sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); 391 + 392 + host->sg_off += count; 393 + 394 + tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); 395 + 396 + if (host->sg_off == host->sg_ptr->length) 397 + tmio_mmc_next_sg(host); 398 + 399 + return; 400 + } 401 + 402 + static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) 403 + { 404 + if (host->sg_ptr == &host->bounce_sg) { 405 + unsigned long flags; 406 + void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); 407 + memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); 408 + tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); 409 + } 410 + } 411 + 412 + /* needs to be called with host->lock held */ 413 + void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 414 + { 415 + struct mmc_data *data = host->data; 416 + struct mmc_command *stop; 417 + 418 + host->data = NULL; 419 + 420 + if (!data) { 421 + dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); 422 + return; 423 + } 424 + stop = data->stop; 425 + 426 + /* FIXME - return correct transfer count on errors */ 427 + if (!data->error) 428 + data->bytes_xfered = data->blocks * data->blksz; 429 + else 430 + data->bytes_xfered = 0; 431 + 432 + pr_debug("Completed data request\n"); 433 + 434 + /* 435 + * FIXME: other drivers allow an optional stop command of any given type 436 + * which we dont do, as the chip can auto generate them. 437 + * Perhaps we can be smarter about when to use auto CMD12 and 438 + * only issue the auto request when we know this is the desired 439 + * stop command, allowing fallback to the stop command the 440 + * upper layers expect. For now, we do what works. 441 + */ 442 + 443 + if (data->flags & MMC_DATA_READ) { 444 + if (host->chan_rx && !host->force_pio) 445 + tmio_mmc_check_bounce_buffer(host); 446 + dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 447 + host->mrq); 448 + } else { 449 + dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", 450 + host->mrq); 451 + } 452 + 453 + if (stop) { 454 + if (stop->opcode == 12 && !stop->arg) 455 + sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); 456 + else 457 + BUG(); 458 + } 459 + 460 + tmio_mmc_finish_request(host); 461 + } 462 + 463 + static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 464 + { 465 + struct mmc_data *data; 466 + spin_lock(&host->lock); 467 + data = host->data; 468 + 469 + if (!data) 470 + goto out; 471 + 472 + if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { 473 + /* 474 + * Has all data been written out yet? Testing on SuperH showed, 475 + * that in most cases the first interrupt comes already with the 476 + * BUSY status bit clear, but on some operations, like mount or 477 + * in the beginning of a write / sync / umount, there is one 478 + * DATAEND interrupt with the BUSY bit set, in this cases 479 + * waiting for one more interrupt fixes the problem. 480 + */ 481 + if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { 482 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 483 + tasklet_schedule(&host->dma_complete); 484 + } 485 + } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { 486 + tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); 487 + tasklet_schedule(&host->dma_complete); 488 + } else { 489 + tmio_mmc_do_data_irq(host); 490 + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); 491 + } 492 + out: 493 + spin_unlock(&host->lock); 494 + } 495 + 496 + static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 497 + unsigned int stat) 498 + { 499 + struct mmc_command *cmd = host->cmd; 500 + int i, addr; 501 + 502 + spin_lock(&host->lock); 503 + 504 + if (!host->cmd) { 505 + pr_debug("Spurious CMD irq\n"); 506 + goto out; 507 + } 508 + 509 + host->cmd = NULL; 510 + 511 + /* This controller is sicker than the PXA one. Not only do we need to 512 + * drop the top 8 bits of the first response word, we also need to 513 + * modify the order of the response for short response command types. 514 + */ 515 + 516 + for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 517 + cmd->resp[i] = sd_ctrl_read32(host, addr); 518 + 519 + if (cmd->flags & MMC_RSP_136) { 520 + cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 521 + cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); 522 + cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); 523 + cmd->resp[3] <<= 8; 524 + } else if (cmd->flags & MMC_RSP_R3) { 525 + cmd->resp[0] = cmd->resp[3]; 526 + } 527 + 528 + if (stat & TMIO_STAT_CMDTIMEOUT) 529 + cmd->error = -ETIMEDOUT; 530 + else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) 531 + cmd->error = -EILSEQ; 532 + 533 + /* If there is data to handle we enable data IRQs here, and 534 + * we will ultimatley finish the request in the data_end handler. 535 + * If theres no data or we encountered an error, finish now. 536 + */ 537 + if (host->data && !cmd->error) { 538 + if (host->data->flags & MMC_DATA_READ) { 539 + if (host->force_pio || !host->chan_rx) 540 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); 541 + else 542 + tasklet_schedule(&host->dma_issue); 543 + } else { 544 + if (host->force_pio || !host->chan_tx) 545 + tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 546 + else 547 + tasklet_schedule(&host->dma_issue); 548 + } 549 + } else { 550 + tmio_mmc_finish_request(host); 551 + } 552 + 553 + out: 554 + spin_unlock(&host->lock); 555 + } 556 + 557 + static irqreturn_t tmio_mmc_irq(int irq, void *devid) 558 + { 559 + struct tmio_mmc_host *host = devid; 560 + struct tmio_mmc_data *pdata = host->pdata; 561 + unsigned int ireg, irq_mask, status; 562 + unsigned int sdio_ireg, sdio_irq_mask, sdio_status; 563 + 564 + pr_debug("MMC IRQ begin\n"); 565 + 566 + status = sd_ctrl_read32(host, CTL_STATUS); 567 + irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 568 + ireg = status & TMIO_MASK_IRQ & ~irq_mask; 569 + 570 + sdio_ireg = 0; 571 + if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) { 572 + sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS); 573 + sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK); 574 + sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask; 575 + 576 + sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL); 577 + 578 + if (sdio_ireg && !host->sdio_irq_enabled) { 579 + pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n", 580 + sdio_status, sdio_irq_mask, sdio_ireg); 581 + tmio_mmc_enable_sdio_irq(host->mmc, 0); 582 + goto out; 583 + } 584 + 585 + if (host->mmc->caps & MMC_CAP_SDIO_IRQ && 586 + sdio_ireg & TMIO_SDIO_STAT_IOIRQ) 587 + mmc_signal_sdio_irq(host->mmc); 588 + 589 + if (sdio_ireg) 590 + goto out; 591 + } 592 + 593 + pr_debug_status(status); 594 + pr_debug_status(ireg); 595 + 596 + if (!ireg) { 597 + tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask); 598 + 599 + pr_warning("tmio_mmc: Spurious irq, disabling! " 600 + "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 601 + pr_debug_status(status); 602 + 603 + goto out; 604 + } 605 + 606 + while (ireg) { 607 + /* Card insert / remove attempts */ 608 + if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 609 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | 610 + TMIO_STAT_CARD_REMOVE); 611 + mmc_detect_change(host->mmc, msecs_to_jiffies(100)); 612 + } 613 + 614 + /* CRC and other errors */ 615 + /* if (ireg & TMIO_STAT_ERR_IRQ) 616 + * handled |= tmio_error_irq(host, irq, stat); 617 + */ 618 + 619 + /* Command completion */ 620 + if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { 621 + tmio_mmc_ack_mmc_irqs(host, 622 + TMIO_STAT_CMDRESPEND | 623 + TMIO_STAT_CMDTIMEOUT); 624 + tmio_mmc_cmd_irq(host, status); 625 + } 626 + 627 + /* Data transfer */ 628 + if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 629 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 630 + tmio_mmc_pio_irq(host); 631 + } 632 + 633 + /* Data transfer completion */ 634 + if (ireg & TMIO_STAT_DATAEND) { 635 + tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); 636 + tmio_mmc_data_irq(host); 637 + } 638 + 639 + /* Check status - keep going until we've handled it all */ 640 + status = sd_ctrl_read32(host, CTL_STATUS); 641 + irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 642 + ireg = status & TMIO_MASK_IRQ & ~irq_mask; 643 + 644 + pr_debug("Status at end of loop: %08x\n", status); 645 + pr_debug_status(status); 646 + } 647 + pr_debug("MMC IRQ end\n"); 648 + 649 + out: 650 + return IRQ_HANDLED; 651 + } 652 + 653 + static int tmio_mmc_start_data(struct tmio_mmc_host *host, 654 + struct mmc_data *data) 655 + { 656 + struct tmio_mmc_data *pdata = host->pdata; 657 + 658 + pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 659 + data->blksz, data->blocks); 660 + 661 + /* Some hardware cannot perform 2 byte requests in 4 bit mode */ 662 + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 663 + int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; 664 + 665 + if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { 666 + pr_err("%s: %d byte block unsupported in 4 bit mode\n", 667 + mmc_hostname(host->mmc), data->blksz); 668 + return -EINVAL; 669 + } 670 + } 671 + 672 + tmio_mmc_init_sg(host, data); 673 + host->data = data; 674 + 675 + /* Set transfer length / blocksize */ 676 + sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 677 + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 678 + 679 + tmio_mmc_start_dma(host, data); 680 + 681 + return 0; 682 + } 683 + 684 + /* Process requests from the MMC layer */ 685 + static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 686 + { 687 + struct tmio_mmc_host *host = mmc_priv(mmc); 688 + int ret; 689 + 690 + if (host->mrq) 691 + pr_debug("request not null\n"); 692 + 693 + host->last_req_ts = jiffies; 694 + wmb(); 695 + host->mrq = mrq; 696 + 697 + if (mrq->data) { 698 + ret = tmio_mmc_start_data(host, mrq->data); 699 + if (ret) 700 + goto fail; 701 + } 702 + 703 + ret = tmio_mmc_start_command(host, mrq->cmd); 704 + if (!ret) { 705 + schedule_delayed_work(&host->delayed_reset_work, 706 + msecs_to_jiffies(2000)); 707 + return; 708 + } 709 + 710 + fail: 711 + host->mrq = NULL; 712 + host->force_pio = false; 713 + mrq->cmd->error = ret; 714 + mmc_request_done(mmc, mrq); 715 + } 716 + 717 + /* Set MMC clock / power. 718 + * Note: This controller uses a simple divider scheme therefore it cannot 719 + * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as 720 + * MMC wont run that fast, it has to be clocked at 12MHz which is the next 721 + * slowest setting. 722 + */ 723 + static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 724 + { 725 + struct tmio_mmc_host *host = mmc_priv(mmc); 726 + 727 + if (ios->clock) 728 + tmio_mmc_set_clock(host, ios->clock); 729 + 730 + /* Power sequence - OFF -> UP -> ON */ 731 + if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 732 + /* power down SD bus */ 733 + if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) 734 + host->set_pwr(host->pdev, 0); 735 + tmio_mmc_clk_stop(host); 736 + } else if (ios->power_mode == MMC_POWER_UP) { 737 + /* power up SD bus */ 738 + if (host->set_pwr) 739 + host->set_pwr(host->pdev, 1); 740 + } else { 741 + /* start bus clock */ 742 + tmio_mmc_clk_start(host); 743 + } 744 + 745 + switch (ios->bus_width) { 746 + case MMC_BUS_WIDTH_1: 747 + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); 748 + break; 749 + case MMC_BUS_WIDTH_4: 750 + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); 751 + break; 752 + } 753 + 754 + /* Let things settle. delay taken from winCE driver */ 755 + udelay(140); 756 + } 757 + 758 + static int tmio_mmc_get_ro(struct mmc_host *mmc) 759 + { 760 + struct tmio_mmc_host *host = mmc_priv(mmc); 761 + struct tmio_mmc_data *pdata = host->pdata; 762 + 763 + return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 764 + !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); 765 + } 766 + 767 + static int tmio_mmc_get_cd(struct mmc_host *mmc) 768 + { 769 + struct tmio_mmc_host *host = mmc_priv(mmc); 770 + struct tmio_mmc_data *pdata = host->pdata; 771 + 772 + if (!pdata->get_cd) 773 + return -ENOSYS; 774 + else 775 + return pdata->get_cd(host->pdev); 776 + } 777 + 778 + static const struct mmc_host_ops tmio_mmc_ops = { 779 + .request = tmio_mmc_request, 780 + .set_ios = tmio_mmc_set_ios, 781 + .get_ro = tmio_mmc_get_ro, 782 + .get_cd = tmio_mmc_get_cd, 783 + .enable_sdio_irq = tmio_mmc_enable_sdio_irq, 784 + }; 785 + 786 + int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, 787 + struct platform_device *pdev, 788 + struct tmio_mmc_data *pdata) 789 + { 790 + struct tmio_mmc_host *_host; 791 + struct mmc_host *mmc; 792 + struct resource *res_ctl; 793 + int ret; 794 + u32 irq_mask = TMIO_MASK_CMD; 795 + 796 + res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); 797 + if (!res_ctl) 798 + return -EINVAL; 799 + 800 + mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); 801 + if (!mmc) 802 + return -ENOMEM; 803 + 804 + _host = mmc_priv(mmc); 805 + _host->pdata = pdata; 806 + _host->mmc = mmc; 807 + _host->pdev = pdev; 808 + platform_set_drvdata(pdev, mmc); 809 + 810 + _host->set_pwr = pdata->set_pwr; 811 + _host->set_clk_div = pdata->set_clk_div; 812 + 813 + /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ 814 + _host->bus_shift = resource_size(res_ctl) >> 10; 815 + 816 + _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 817 + if (!_host->ctl) { 818 + ret = -ENOMEM; 819 + goto host_free; 820 + } 821 + 822 + mmc->ops = &tmio_mmc_ops; 823 + mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; 824 + mmc->f_max = pdata->hclk; 825 + mmc->f_min = mmc->f_max / 512; 826 + mmc->max_segs = 32; 827 + mmc->max_blk_size = 512; 828 + mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 829 + mmc->max_segs; 830 + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 831 + mmc->max_seg_size = mmc->max_req_size; 832 + if (pdata->ocr_mask) 833 + mmc->ocr_avail = pdata->ocr_mask; 834 + else 835 + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 836 + 837 + tmio_mmc_clk_stop(_host); 838 + tmio_mmc_reset(_host); 839 + 840 + ret = platform_get_irq(pdev, 0); 841 + if (ret < 0) 842 + goto unmap_ctl; 843 + 844 + _host->irq = ret; 845 + 846 + tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 847 + if (pdata->flags & TMIO_MMC_SDIO_IRQ) 848 + tmio_mmc_enable_sdio_irq(mmc, 0); 849 + 850 + ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | 851 + IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); 852 + if (ret) 853 + goto unmap_ctl; 854 + 855 + spin_lock_init(&_host->lock); 856 + 857 + /* Init delayed work for request timeouts */ 858 + INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); 859 + 860 + /* See if we also get DMA */ 861 + tmio_mmc_request_dma(_host, pdata); 862 + 863 + mmc_add_host(mmc); 864 + 865 + /* Unmask the IRQs we want to know about */ 866 + if (!_host->chan_rx) 867 + irq_mask |= TMIO_MASK_READOP; 868 + if (!_host->chan_tx) 869 + irq_mask |= TMIO_MASK_WRITEOP; 870 + 871 + tmio_mmc_enable_mmc_irqs(_host, irq_mask); 872 + 873 + *host = _host; 874 + 875 + return 0; 876 + 877 + unmap_ctl: 878 + iounmap(_host->ctl); 879 + host_free: 880 + mmc_free_host(mmc); 881 + 882 + return ret; 883 + } 884 + EXPORT_SYMBOL(tmio_mmc_host_probe); 885 + 886 + void tmio_mmc_host_remove(struct tmio_mmc_host *host) 887 + { 888 + mmc_remove_host(host->mmc); 889 + cancel_delayed_work_sync(&host->delayed_reset_work); 890 + tmio_mmc_release_dma(host); 891 + free_irq(host->irq, host); 892 + iounmap(host->ctl); 893 + mmc_free_host(host->mmc); 894 + } 895 + EXPORT_SYMBOL(tmio_mmc_host_remove); 896 + 897 + MODULE_LICENSE("GPL v2");
+2 -3
drivers/mmc/host/via-sdmmc.c
··· 1087 1087 struct mmc_host *mmc; 1088 1088 struct via_crdr_mmc_host *sdhost; 1089 1089 u32 base, len; 1090 - u8 rev, gatt; 1090 + u8 gatt; 1091 1091 int ret; 1092 1092 1093 - pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev); 1094 1093 pr_info(DRV_NAME 1095 1094 ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", 1096 1095 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, 1097 - (int)rev); 1096 + (int)pcidev->revision); 1098 1097 1099 1098 ret = pci_enable_device(pcidev); 1100 1099 if (ret)
include/linux/mfd/sh_mobile_sdhi.h include/linux/mmc/sh_mobile_sdhi.h
+63
include/linux/mmc/tmio.h
··· 1 + /* 2 + * include/linux/mmc/tmio.h 3 + * 4 + * Copyright (C) 2007 Ian Molton 5 + * Copyright (C) 2004 Ian Molton 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * Driver for the MMC / SD / SDIO cell found in: 12 + * 13 + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 14 + */ 15 + #ifndef _LINUX_MMC_TMIO_H_ 16 + #define _LINUX_MMC_TMIO_H_ 17 + 18 + #define CTL_SD_CMD 0x00 19 + #define CTL_ARG_REG 0x04 20 + #define CTL_STOP_INTERNAL_ACTION 0x08 21 + #define CTL_XFER_BLK_COUNT 0xa 22 + #define CTL_RESPONSE 0x0c 23 + #define CTL_STATUS 0x1c 24 + #define CTL_IRQ_MASK 0x20 25 + #define CTL_SD_CARD_CLK_CTL 0x24 26 + #define CTL_SD_XFER_LEN 0x26 27 + #define CTL_SD_MEM_CARD_OPT 0x28 28 + #define CTL_SD_ERROR_DETAIL_STATUS 0x2c 29 + #define CTL_SD_DATA_PORT 0x30 30 + #define CTL_TRANSACTION_CTL 0x34 31 + #define CTL_SDIO_STATUS 0x36 32 + #define CTL_SDIO_IRQ_MASK 0x38 33 + #define CTL_RESET_SD 0xe0 34 + #define CTL_SDIO_REGS 0x100 35 + #define CTL_CLK_AND_WAIT_CTL 0x138 36 + #define CTL_RESET_SDIO 0x1e0 37 + 38 + /* Definitions for values the CTRL_STATUS register can take. */ 39 + #define TMIO_STAT_CMDRESPEND 0x00000001 40 + #define TMIO_STAT_DATAEND 0x00000004 41 + #define TMIO_STAT_CARD_REMOVE 0x00000008 42 + #define TMIO_STAT_CARD_INSERT 0x00000010 43 + #define TMIO_STAT_SIGSTATE 0x00000020 44 + #define TMIO_STAT_WRPROTECT 0x00000080 45 + #define TMIO_STAT_CARD_REMOVE_A 0x00000100 46 + #define TMIO_STAT_CARD_INSERT_A 0x00000200 47 + #define TMIO_STAT_SIGSTATE_A 0x00000400 48 + #define TMIO_STAT_CMD_IDX_ERR 0x00010000 49 + #define TMIO_STAT_CRCFAIL 0x00020000 50 + #define TMIO_STAT_STOPBIT_ERR 0x00040000 51 + #define TMIO_STAT_DATATIMEOUT 0x00080000 52 + #define TMIO_STAT_RXOVERFLOW 0x00100000 53 + #define TMIO_STAT_TXUNDERRUN 0x00200000 54 + #define TMIO_STAT_CMDTIMEOUT 0x00400000 55 + #define TMIO_STAT_RXRDY 0x01000000 56 + #define TMIO_STAT_TXRQ 0x02000000 57 + #define TMIO_STAT_ILL_FUNC 0x20000000 58 + #define TMIO_STAT_CMD_BUSY 0x40000000 59 + #define TMIO_STAT_ILL_ACCESS 0x80000000 60 + 61 + #define TMIO_BBS 512 /* Boot block size */ 62 + 63 + #endif /* _LINUX_MMC_TMIO_H_ */